From 699f5131aa06925d3189adb693d09384ec24a88e Mon Sep 17 00:00:00 2001 From: Huiyu Xie Date: Sun, 29 Sep 2024 17:31:28 -1000 Subject: [PATCH] Refactor tests based on new GPU cache (#60) * Start * Minor fix * Add tests * Fix errors from upstream * Add tests * Add tests * Add tests --- src/TrixiCUDA.jl | 3 +- src/solvers/common.jl | 2 +- src/solvers/containers_3d.jl | 1 - src/solvers/dg_1d.jl | 29 +- src/solvers/dg_2d.jl | 33 +- src/solvers/dg_3d.jl | 144 +++---- test/dgsem_tree_test_suite.jl | 280 ------------- test/runtests.jl | 16 +- test/test_adevction_mortar.jl | 255 ------------ test/test_advection_basic.jl | 344 ---------------- test/test_euler_dirichlet.jl | 370 ----------------- test/test_euler_ec.jl | 347 ---------------- test/test_euler_shockcapturing.jl | 379 ------------------ test/test_euler_source_terms.jl | 346 ---------------- test/test_eulermulti_ec.jl | 233 ----------- test/test_hypdiff_nonperiodic.jl | 368 ----------------- test/{test_trixicuda.jl => test_macros.jl} | 38 +- test/test_mhd_alfven_wave_mortar.jl | 263 ------------ test/test_mhd_ec.jl | 352 ---------------- test/test_mhd_shockcapturing.jl | 277 ------------- test/test_script.jl | 71 ++-- test/test_shallowwater_dirichlet.jl | 244 ----------- test/test_shallowwater_ec.jl | 298 -------------- test/test_shallowwater_shockcapturing.jl | 123 ------ test/test_shallowwater_source_terms.jl | 238 ----------- test/tree_dgsem_1d/advection_amr.jl | 133 ++++++ .../advection_amr_nonperiodic.jl | 141 +++++++ test/tree_dgsem_1d/advection_basic.jl | 134 +++++++ test/tree_dgsem_1d/advection_extended.jl | 139 +++++++ test/tree_dgsem_1d/burgers_basic.jl | 134 +++++++ .../tree_dgsem_1d/burgers_linear_stability.jl | 139 +++++++ test/tree_dgsem_1d/burgers_rarefraction.jl | 172 ++++++++ test/tree_dgsem_1d/burgers_shock.jl | 173 ++++++++ test/tree_dgsem_1d/euler_blast_wave.jl | 157 ++++++++ test/tree_dgsem_1d/euler_ec.jl | 134 +++++++ test/tree_dgsem_1d/euler_shock.jl | 143 +++++++ test/tree_dgsem_1d/euler_source_terms.jl | 134 +++++++ .../euler_source_terms_nonperiodic.jl | 141 +++++++ test/tree_dgsem_1d/eulermulti_ec.jl | 135 +++++++ test/tree_dgsem_1d/eulermulti_es.jl | 135 +++++++ test/tree_dgsem_1d/eulerquasi_ec.jl | 144 +++++++ test/tree_dgsem_1d/eulerquasi_source_terms.jl | 137 +++++++ .../hypdiff_harmonic_nonperiodic.jl | 153 +++++++ test/tree_dgsem_1d/hypdiff_nonperiodic.jl | 139 +++++++ test/tree_dgsem_1d/mhd_alfven_wave.jl | 135 +++++++ test/tree_dgsem_1d/mhd_ec.jl | 135 +++++++ test/tree_dgsem_1d/shallowwater_shock.jl | 176 ++++++++ test/tree_dgsem_1d/tree_dgsem_1d.jl | 22 + test/tree_dgsem_2d/advection_basic.jl | 157 ++++++++ test/tree_dgsem_2d/advection_mortar.jl | 158 ++++++++ test/tree_dgsem_2d/euler_shock.jl | 166 ++++++++ test/tree_dgsem_2d/euler_source_terms.jl | 156 +++++++ .../euler_source_terms_nonperiodic.jl | 166 ++++++++ test/tree_dgsem_2d/euler_vortex_mortar.jl | 185 +++++++++ test/tree_dgsem_2d/eulermulti_ec.jl | 158 ++++++++ test/tree_dgsem_2d/eulermulti_es.jl | 158 ++++++++ test/tree_dgsem_2d/hypdiff_nonperiodic.jl | 165 ++++++++ test/tree_dgsem_2d/mhd_alfven_wave.jl | 159 ++++++++ test/tree_dgsem_2d/mhd_alfven_wave_mortar.jl | 163 ++++++++ test/tree_dgsem_2d/mhd_shock.jl | 169 ++++++++ test/tree_dgsem_2d/shallowwater_ec.jl | 158 ++++++++ .../shallowwater_source_terms.jl | 161 ++++++++ .../shawllowwater_source_terms_nonperiodic.jl | 165 ++++++++ test/tree_dgsem_2d/tree_dgsem_2d.jl | 15 + test/tree_dgsem_3d/advection_basic.jl | 159 ++++++++ test/tree_dgsem_3d/advection_mortar.jl | 162 ++++++++ test/tree_dgsem_3d/euler_convergence.jl | 160 ++++++++ test/tree_dgsem_3d/euler_ec.jl | 159 ++++++++ test/tree_dgsem_3d/euler_mortar.jl | 161 ++++++++ test/tree_dgsem_3d/euler_shock.jl | 170 ++++++++ test/tree_dgsem_3d/euler_source_terms.jl | 160 ++++++++ test/tree_dgsem_3d/hypdiff_nonperiodic.jl | 168 ++++++++ test/tree_dgsem_3d/mhd_alfven_wave.jl | 160 ++++++++ test/tree_dgsem_3d/mhd_alfven_wave_mortar.jl | 164 ++++++++ test/tree_dgsem_3d/mhd_ec.jl | 160 ++++++++ test/tree_dgsem_3d/mhd_shock.jl | 171 ++++++++ test/tree_dgsem_3d/tree_dgsem_3d.jl | 12 + 77 files changed, 7777 insertions(+), 4887 deletions(-) delete mode 100644 test/dgsem_tree_test_suite.jl delete mode 100644 test/test_adevction_mortar.jl delete mode 100644 test/test_advection_basic.jl delete mode 100644 test/test_euler_dirichlet.jl delete mode 100644 test/test_euler_ec.jl delete mode 100644 test/test_euler_shockcapturing.jl delete mode 100644 test/test_euler_source_terms.jl delete mode 100644 test/test_eulermulti_ec.jl delete mode 100644 test/test_hypdiff_nonperiodic.jl rename test/{test_trixicuda.jl => test_macros.jl} (71%) delete mode 100644 test/test_mhd_alfven_wave_mortar.jl delete mode 100644 test/test_mhd_ec.jl delete mode 100644 test/test_mhd_shockcapturing.jl delete mode 100644 test/test_shallowwater_dirichlet.jl delete mode 100644 test/test_shallowwater_ec.jl delete mode 100644 test/test_shallowwater_shockcapturing.jl delete mode 100644 test/test_shallowwater_source_terms.jl create mode 100644 test/tree_dgsem_1d/advection_amr.jl create mode 100644 test/tree_dgsem_1d/advection_amr_nonperiodic.jl create mode 100644 test/tree_dgsem_1d/advection_basic.jl create mode 100644 test/tree_dgsem_1d/advection_extended.jl create mode 100644 test/tree_dgsem_1d/burgers_basic.jl create mode 100644 test/tree_dgsem_1d/burgers_linear_stability.jl create mode 100644 test/tree_dgsem_1d/burgers_rarefraction.jl create mode 100644 test/tree_dgsem_1d/burgers_shock.jl create mode 100644 test/tree_dgsem_1d/euler_blast_wave.jl create mode 100644 test/tree_dgsem_1d/euler_ec.jl create mode 100644 test/tree_dgsem_1d/euler_shock.jl create mode 100644 test/tree_dgsem_1d/euler_source_terms.jl create mode 100644 test/tree_dgsem_1d/euler_source_terms_nonperiodic.jl create mode 100644 test/tree_dgsem_1d/eulermulti_ec.jl create mode 100644 test/tree_dgsem_1d/eulermulti_es.jl create mode 100644 test/tree_dgsem_1d/eulerquasi_ec.jl create mode 100644 test/tree_dgsem_1d/eulerquasi_source_terms.jl create mode 100644 test/tree_dgsem_1d/hypdiff_harmonic_nonperiodic.jl create mode 100644 test/tree_dgsem_1d/hypdiff_nonperiodic.jl create mode 100644 test/tree_dgsem_1d/mhd_alfven_wave.jl create mode 100644 test/tree_dgsem_1d/mhd_ec.jl create mode 100644 test/tree_dgsem_1d/shallowwater_shock.jl create mode 100644 test/tree_dgsem_1d/tree_dgsem_1d.jl create mode 100644 test/tree_dgsem_2d/advection_basic.jl create mode 100644 test/tree_dgsem_2d/advection_mortar.jl create mode 100644 test/tree_dgsem_2d/euler_shock.jl create mode 100644 test/tree_dgsem_2d/euler_source_terms.jl create mode 100644 test/tree_dgsem_2d/euler_source_terms_nonperiodic.jl create mode 100644 test/tree_dgsem_2d/euler_vortex_mortar.jl create mode 100644 test/tree_dgsem_2d/eulermulti_ec.jl create mode 100644 test/tree_dgsem_2d/eulermulti_es.jl create mode 100644 test/tree_dgsem_2d/hypdiff_nonperiodic.jl create mode 100644 test/tree_dgsem_2d/mhd_alfven_wave.jl create mode 100644 test/tree_dgsem_2d/mhd_alfven_wave_mortar.jl create mode 100644 test/tree_dgsem_2d/mhd_shock.jl create mode 100644 test/tree_dgsem_2d/shallowwater_ec.jl create mode 100644 test/tree_dgsem_2d/shallowwater_source_terms.jl create mode 100644 test/tree_dgsem_2d/shawllowwater_source_terms_nonperiodic.jl create mode 100644 test/tree_dgsem_2d/tree_dgsem_2d.jl create mode 100644 test/tree_dgsem_3d/advection_basic.jl create mode 100644 test/tree_dgsem_3d/advection_mortar.jl create mode 100644 test/tree_dgsem_3d/euler_convergence.jl create mode 100644 test/tree_dgsem_3d/euler_ec.jl create mode 100644 test/tree_dgsem_3d/euler_mortar.jl create mode 100644 test/tree_dgsem_3d/euler_shock.jl create mode 100644 test/tree_dgsem_3d/euler_source_terms.jl create mode 100644 test/tree_dgsem_3d/hypdiff_nonperiodic.jl create mode 100644 test/tree_dgsem_3d/mhd_alfven_wave.jl create mode 100644 test/tree_dgsem_3d/mhd_alfven_wave_mortar.jl create mode 100644 test/tree_dgsem_3d/mhd_ec.jl create mode 100644 test/tree_dgsem_3d/mhd_shock.jl create mode 100644 test/tree_dgsem_3d/tree_dgsem_3d.jl diff --git a/src/TrixiCUDA.jl b/src/TrixiCUDA.jl index 2a82993..067b05a 100644 --- a/src/TrixiCUDA.jl +++ b/src/TrixiCUDA.jl @@ -14,7 +14,8 @@ using Trixi: AbstractEquations, AbstractContainer, L2MortarContainer2D, L2MortarContainer3D, True, False, TreeMesh, DGSEM, - BoundaryConditionPeriodic, SemidiscretizationHyperbolic, + SemidiscretizationHyperbolic, + BoundaryConditionPeriodic, BoundaryConditionDirichlet, VolumeIntegralWeakForm, VolumeIntegralFluxDifferencing, VolumeIntegralShockCapturingHG, LobattoLegendreMortarL2, flux, ntuple, nvariables, nnodes, nelements, nmortars, diff --git a/src/solvers/common.jl b/src/solvers/common.jl index f1f3d7c..15f9c40 100644 --- a/src/solvers/common.jl +++ b/src/solvers/common.jl @@ -2,7 +2,7 @@ # Copy data from CPU to GPU function copy_to_gpu!(du::PtrArray, u::PtrArray) - du = CuArray{Float64}(zero(du)) + du = CUDA.zeros(Float64, size(du)...) u = CuArray{Float64}(u) return (du, u) diff --git a/src/solvers/containers_3d.jl b/src/solvers/containers_3d.jl index 9b45dc5..c353e61 100644 --- a/src/solvers/containers_3d.jl +++ b/src/solvers/containers_3d.jl @@ -9,7 +9,6 @@ mutable struct ElementContainerGPU3D{RealT <: Real, uEltype <: Real} <: Abstract surface_flux_values::CuArray{uEltype, 5} cell_ids::CuArray{Int, 1} - # Inner constructor # Inner constructor function ElementContainerGPU3D{RealT, uEltype}(dims_inverse_jacobian::NTuple{1, Int}, dims_node_coordinates::NTuple{5, Int}, diff --git a/src/solvers/dg_1d.jl b/src/solvers/dg_1d.jl index 45d3a89..cefff53 100644 --- a/src/solvers/dg_1d.jl +++ b/src/solvers/dg_1d.jl @@ -703,8 +703,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{1}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -773,8 +773,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{1}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -966,14 +966,15 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{1}, boundary_conditions::NamedTup lasts = zero(n_boundaries_per_direction) firsts = zero(n_boundaries_per_direction) + # May introduce kernel launching overhead last_first_indices_kernel = @cuda launch=false last_first_indices_kernel!(lasts, firsts, n_boundaries_per_direction) last_first_indices_kernel(lasts, firsts, n_boundaries_per_direction; configurator_1d(last_first_indices_kernel, lasts)...) - lasts, firsts = Array(lasts), Array(firsts) - boundary_arr = CuArray{Int64}(firsts[1]:lasts[2]) - indices_arr = CuArray{Int64}([firsts[1], firsts[2]]) + indices_arr = firsts + boundary_arr = CuArray{Int}(Array(firsts)[1]:Array(lasts)[end]) + boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) boundary_flux_kernel = @cuda launch=false boundary_flux_kernel!(surface_flux_values, @@ -1009,30 +1010,30 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{1}, boundary_conditions::NamedTup lasts = zero(n_boundaries_per_direction) firsts = zero(n_boundaries_per_direction) + # May introduce kernel launching overhead last_first_indices_kernel = @cuda launch=false last_first_indices_kernel!(lasts, firsts, n_boundaries_per_direction) last_first_indices_kernel(lasts, firsts, n_boundaries_per_direction; configurator_1d(last_first_indices_kernel, lasts)...) - lasts, firsts = Array(lasts), Array(firsts) - boundary_arr = CuArray{Int64}(firsts[1]:lasts[2]) - indices_arr = CuArray{Int64}([firsts[1], firsts[2]]) + indices_arr = firsts + boundary_arr = CuArray{Int}(Array(firsts)[1]:Array(lasts)[end]) - # Replace with callable functions (not necessary here) - # boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) + boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) boundary_flux_kernel = @cuda launch=false boundary_flux_kernel!(surface_flux_values, boundaries_u, node_coordinates, t, boundary_arr, indices_arr, neighbor_ids, neighbor_sides, orientations, - boundary_conditions, + boundary_conditions_callable, equations, surface_flux, nonconservative_flux) boundary_flux_kernel(surface_flux_values, boundaries_u, node_coordinates, t, boundary_arr, indices_arr, neighbor_ids, neighbor_sides, orientations, - boundary_conditions, equations, surface_flux, nonconservative_flux; + boundary_conditions_callable, equations, surface_flux, + nonconservative_flux; configurator_1d(boundary_flux_kernel, boundary_arr)...) return nothing diff --git a/src/solvers/dg_2d.jl b/src/solvers/dg_2d.jl index 2158700..8444764 100644 --- a/src/solvers/dg_2d.jl +++ b/src/solvers/dg_2d.jl @@ -1125,8 +1125,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{2}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -1209,8 +1209,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{2}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -1440,16 +1440,16 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{2}, boundary_conditions::NamedTup lasts = zero(n_boundaries_per_direction) firsts = zero(n_boundaries_per_direction) + # May introduce kernel launching overhead last_first_indices_kernel = @cuda launch=false last_first_indices_kernel!(lasts, firsts, n_boundaries_per_direction) last_first_indices_kernel(lasts, firsts, n_boundaries_per_direction; configurator_1d(last_first_indices_kernel, lasts)...) - lasts, firsts = Array(lasts), Array(firsts) - boundary_arr = CuArray{Int64}(firsts[1]:lasts[4]) - indices_arr = CuArray{Int64}([firsts[1], firsts[2], firsts[3], firsts[4]]) - boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) + indices_arr = firsts + boundary_arr = CuArray{Int}(Array(firsts)[1]:Array(lasts)[end]) + boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) size_arr = CuArray{Float64}(undef, size(surface_flux_values, 2), length(boundary_arr)) boundary_flux_kernel = @cuda launch=false boundary_flux_kernel!(surface_flux_values, @@ -1485,18 +1485,16 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{2}, boundary_conditions::NamedTup lasts = zero(n_boundaries_per_direction) firsts = zero(n_boundaries_per_direction) + # May introduce kernel launching overhead last_first_indices_kernel = @cuda launch=false last_first_indices_kernel!(lasts, firsts, n_boundaries_per_direction) last_first_indices_kernel(lasts, firsts, n_boundaries_per_direction; configurator_1d(last_first_indices_kernel, lasts)...) - lasts, firsts = Array(lasts), Array(firsts) - boundary_arr = CuArray{Int64}(firsts[1]:lasts[4]) - indices_arr = CuArray{Int64}([firsts[1], firsts[2], firsts[3], firsts[4]]) - - # Replace with callable functions (not necessary here) - # boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) + indices_arr = firsts + boundary_arr = CuArray{Int}(Array(firsts)[1]:Array(lasts)[end]) + boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) size_arr = CuArray{Float64}(undef, size(surface_flux_values, 2), length(boundary_arr)) boundary_flux_kernel = @cuda launch=false boundary_flux_kernel!(surface_flux_values, @@ -1504,13 +1502,14 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{2}, boundary_conditions::NamedTup t, boundary_arr, indices_arr, neighbor_ids, neighbor_sides, orientations, - boundary_conditions, + boundary_conditions_callable, equations, surface_flux, nonconservative_flux) boundary_flux_kernel(surface_flux_values, boundaries_u, node_coordinates, t, boundary_arr, indices_arr, neighbor_ids, neighbor_sides, orientations, - boundary_conditions, equations, surface_flux, nonconservative_flux; + boundary_conditions_callable, equations, surface_flux, + nonconservative_flux; configurator_2d(boundary_flux_kernel, size_arr)...) return nothing @@ -1577,7 +1576,7 @@ function cuda_mortar_flux!(mesh::TreeMesh{2}, cache_mortars::True, nonconservati large_sides = cache.mortars.large_sides orientations = cache.mortars.orientations - # + # The original CPU arrays hold NaNs u_upper = cache.mortars.u_upper u_lower = cache.mortars.u_lower reverse_upper = CuArray{Float64}(dg.mortar.reverse_upper) diff --git a/src/solvers/dg_3d.jl b/src/solvers/dg_3d.jl index d3f7cc9..869e378 100644 --- a/src/solvers/dg_3d.jl +++ b/src/solvers/dg_3d.jl @@ -633,6 +633,35 @@ function surface_flux_kernel!(surface_flux_arr, interfaces_u, orientations, return nothing end +# Kernel for calculating surface and both nonconservative fluxes +function surface_noncons_flux_kernel!(surface_flux_arr, noncons_left_arr, noncons_right_arr, + interfaces_u, orientations, equations::AbstractEquations{3}, + surface_flux::Any, nonconservative_flux::Any) + j1 = (blockIdx().x - 1) * blockDim().x + threadIdx().x + j2 = (blockIdx().y - 1) * blockDim().y + threadIdx().y + k = (blockIdx().z - 1) * blockDim().z + threadIdx().z + + if (j1 <= size(surface_flux_arr, 2) && j2 <= size(surface_flux_arr, 3) && + k <= size(surface_flux_arr, 4)) + u_ll, u_rr = get_surface_node_vars(interfaces_u, equations, j1, j2, k) + orientation = orientations[k] + + surface_flux_node = surface_flux(u_ll, u_rr, orientation, equations) + noncons_left_node = nonconservative_flux(u_ll, u_rr, orientation, equations) + noncons_right_node = nonconservative_flux(u_rr, u_ll, orientation, equations) + + @inbounds begin + for ii in axes(surface_flux_arr, 1) + surface_flux_arr[ii, j1, j2, k] = surface_flux_node[ii] + noncons_left_arr[ii, j1, j2, k] = noncons_left_node[ii] + noncons_right_arr[ii, j1, j2, k] = noncons_right_node[ii] + end + end + end + + return nothing +end + # Kernel for setting interface fluxes function interface_flux_kernel!(surface_flux_values, surface_flux_arr, neighbor_ids, orientations, equations::AbstractEquations{3}) @@ -661,6 +690,41 @@ function interface_flux_kernel!(surface_flux_values, surface_flux_arr, neighbor_ return nothing end +# Kernel for setting interface fluxes +function interface_flux_kernel!(surface_flux_values, surface_flux_arr, noncons_left_arr, + noncons_right_arr, neighbor_ids, orientations, + equations::AbstractEquations{3}) + i = (blockIdx().x - 1) * blockDim().x + threadIdx().x + j = (blockIdx().y - 1) * blockDim().y + threadIdx().y + k = (blockIdx().z - 1) * blockDim().z + threadIdx().z + + if (i <= size(surface_flux_values, 1) && j <= size(surface_flux_arr, 2)^2 && + k <= size(surface_flux_arr, 4)) + j1 = div(j - 1, size(surface_flux_arr, 2)) + 1 + j2 = rem(j - 1, size(surface_flux_arr, 2)) + 1 + + left_id = neighbor_ids[1, k] + right_id = neighbor_ids[2, k] + + left_direction = 2 * orientations[k] + right_direction = 2 * orientations[k] - 1 + + @inbounds begin + surface_flux_values[i, j1, j2, left_direction, left_id] = surface_flux_arr[i, j1, j2, + k] + + 0.5 * + noncons_left_arr[i, j1, j2, k] + surface_flux_values[i, j1, j2, right_direction, right_id] = surface_flux_arr[i, j1, j2, + k] + + 0.5 * + noncons_right_arr[i, j1, j2, + k] + end + end + + return nothing +end + # Kernel for prolonging two boundaries function prolong_boundaries_kernel!(boundaries_u, u, neighbor_ids, neighbor_sides, orientations, equations::AbstractEquations{3}) @@ -1451,8 +1515,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{3}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -1544,8 +1608,8 @@ function cuda_volume_integral!(du, u, mesh::TreeMesh{3}, nonconservative_terms:: # For `Float32`, this gives 1.1920929f-5 atol = 1.8189894035458565e-12 # see also `pure_and_blended_element_ids!` in Trixi.jl - element_ids_dg = zero(CuArray{Int64}(undef, length(alpha))) - element_ids_dgfv = zero(CuArray{Int64}(undef, length(alpha))) + element_ids_dg = CUDA.zeros(Int, length(alpha)) + element_ids_dgfv = CUDA.zeros(Int, length(alpha)) pure_blended_element_count_kernel = @cuda launch=false pure_blended_element_count_kernel!(element_ids_dg, element_ids_dgfv, @@ -1693,70 +1757,6 @@ function cuda_interface_flux!(mesh::TreeMesh{3}, nonconservative_terms::False, e return nothing end -# Kernel for calculating surface and both nonconservative fluxes -function surface_noncons_flux_kernel!(surface_flux_arr, noncons_left_arr, noncons_right_arr, - interfaces_u, orientations, equations::AbstractEquations{3}, - surface_flux::Any, nonconservative_flux::Any) - j1 = (blockIdx().x - 1) * blockDim().x + threadIdx().x - j2 = (blockIdx().y - 1) * blockDim().y + threadIdx().y - k = (blockIdx().z - 1) * blockDim().z + threadIdx().z - - if (j1 <= size(surface_flux_arr, 2) && j2 <= size(surface_flux_arr, 3) && - k <= size(surface_flux_arr, 4)) - u_ll, u_rr = get_surface_node_vars(interfaces_u, equations, j1, j2, k) - orientation = orientations[k] - - surface_flux_node = surface_flux(u_ll, u_rr, orientation, equations) - noncons_left_node = nonconservative_flux(u_ll, u_rr, orientation, equations) - noncons_right_node = nonconservative_flux(u_rr, u_ll, orientation, equations) - - @inbounds begin - for ii in axes(surface_flux_arr, 1) - surface_flux_arr[ii, j1, j2, k] = surface_flux_node[ii] - noncons_left_arr[ii, j1, j2, k] = noncons_left_node[ii] - noncons_right_arr[ii, j1, j2, k] = noncons_right_node[ii] - end - end - end - - return nothing -end - -# Kernel for setting interface fluxes -function interface_flux_kernel!(surface_flux_values, surface_flux_arr, noncons_left_arr, - noncons_right_arr, neighbor_ids, orientations, - equations::AbstractEquations{3}) - i = (blockIdx().x - 1) * blockDim().x + threadIdx().x - j = (blockIdx().y - 1) * blockDim().y + threadIdx().y - k = (blockIdx().z - 1) * blockDim().z + threadIdx().z - - if (i <= size(surface_flux_values, 1) && j <= size(surface_flux_arr, 2)^2 && - k <= size(surface_flux_arr, 4)) - j1 = div(j - 1, size(surface_flux_arr, 2)) + 1 - j2 = rem(j - 1, size(surface_flux_arr, 2)) + 1 - - left_id = neighbor_ids[1, k] - right_id = neighbor_ids[2, k] - - left_direction = 2 * orientations[k] - right_direction = 2 * orientations[k] - 1 - - @inbounds begin - surface_flux_values[i, j1, j2, left_direction, left_id] = surface_flux_arr[i, j1, j2, - k] + - 0.5 * - noncons_left_arr[i, j1, j2, k] - surface_flux_values[i, j1, j2, right_direction, right_id] = surface_flux_arr[i, j1, j2, - k] + - 0.5 * - noncons_right_arr[i, j1, j2, - k] - end - end - - return nothing -end - # Pack kernels for calculating interface fluxes function cuda_interface_flux!(mesh::TreeMesh{3}, nonconservative_terms::True, equations, dg::DGSEM, cache) @@ -1855,16 +1855,16 @@ function cuda_boundary_flux!(t, mesh::TreeMesh{3}, boundary_conditions::NamedTup lasts = zero(n_boundaries_per_direction) firsts = zero(n_boundaries_per_direction) + # May introduce kernel launching overhead last_first_indices_kernel = @cuda launch=false last_first_indices_kernel!(lasts, firsts, n_boundaries_per_direction) last_first_indices_kernel(lasts, firsts, n_boundaries_per_direction; configurator_1d(last_first_indices_kernel, lasts)...) - lasts, firsts = Array(lasts), Array(firsts) - boundary_arr = CuArray{Int64}(firsts[1]:lasts[6]) - indices_arr = CuArray{Int64}([firsts[1], firsts[2], firsts[3], firsts[4], firsts[5], firsts[6]]) - boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) + indices_arr = firsts + boundary_arr = CuArray{Int}(Array(firsts)[1]:Array(lasts)[end]) + boundary_conditions_callable = replace_boundary_conditions(boundary_conditions) size_arr = CuArray{Float64}(undef, size(surface_flux_values, 2)^2, length(boundary_arr)) boundary_flux_kernel = @cuda launch=false boundary_flux_kernel!(surface_flux_values, diff --git a/test/dgsem_tree_test_suite.jl b/test/dgsem_tree_test_suite.jl deleted file mode 100644 index b7acc01..0000000 --- a/test/dgsem_tree_test_suite.jl +++ /dev/null @@ -1,280 +0,0 @@ -# Test suite for DGSEM solver with tree mesh on 1D, 2D, and 3D problems. - -# Test suite for 1D problems -function run_dgsem_tree_tests_1D(semi, tspan) - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) -end - -# Test suite for 2D problems -function run_dgsem_tree_tests_2D(semi, tspan) - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) -end - -# Test suite for 3D problems -function run_dgsem_tree_tests_3D(semi, tspan) - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) -end diff --git a/test/runtests.jl b/test/runtests.jl index a1c6c22..aa306ac 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,9 +1,11 @@ -using TrixiCUDA -using Test +module TestTrixiCUDA -# Note that it is complicated to get tight error bounds for GPU kernels, here we use `isapprox` -# with the default mode to validate the precision by comparing the results from GPU kernels and -# CPU kernels, which corresponds to requiring equality of about half of the significant digits -# (see https://docs.julialang.org/en/v1/base/math/#Base.isapprox). +using Test: @testset -@testset "TrixiCUDA.jl" begin end +@testset "TrixiCUDA.jl" begin + # include("./tree_dgsem_1d/tree_dgsem_1d.jl") + # include("./tree_dgsem_2d/tree_dgsem_2d.jl") + # include("./tree_dgsem_3d/tree_dgsem_3d.jl") +end + +end diff --git a/test/test_adevction_mortar.jl b/test/test_adevction_mortar.jl deleted file mode 100644 index 2a9b72e..0000000 --- a/test/test_adevction_mortar.jl +++ /dev/null @@ -1,255 +0,0 @@ -module TestLinearAdvectionMortar - -####################################################################### Tags -# Kernels: -# -`cuda_prolong2mortars!` -# - `cuda_mortar_flux!` -# Conditions: -# - `nonconservative_terms::False` -# - `cache_mortars::True` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Linear Advection" begin - @testset "Linear Advection 2D" begin - advection_velocity = (0.2, -0.7) - equations = LinearScalarAdvectionEquation2D(advection_velocity) - - initial_condition = initial_condition_convergence_test - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (-1.0, -1.0) - coordinates_max = (1.0, 1.0) - refinement_patches = ((type = "box", coordinates_min = (0.0, -1.0), - coordinates_max = (1.0, 1.0)),) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - refinement_patches = refinement_patches, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache.mortars.u_upper ≈ cache_gpu.mortars.u_upper - @test_approx cache.mortars.u_lower ≈ cache_gpu.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Linear AdVection 3D" begin - advection_velocity = (0.2, -0.7, 0.5) - equations = LinearScalarAdvectionEquation3D(advection_velocity) - - initial_condition = initial_condition_convergence_test - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (-1.0, -1.0, -1.0) - coordinates_max = (1.0, 1.0, 1.0) - refinement_patches = ((type = "box", coordinates_min = (0.0, -1.0, -1.0), - coordinates_max = (1.0, 1.0, 1.0)), - (type = "box", coordinates_min = (0.0, -0.5, -0.5), - coordinates_max = (0.5, 0.5, 0.5))) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - refinement_patches = refinement_patches, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 5.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_advection_basic.jl b/test/test_advection_basic.jl deleted file mode 100644 index c0c2148..0000000 --- a/test/test_advection_basic.jl +++ /dev/null @@ -1,344 +0,0 @@ -module TestLinearAdvectionBasic - -####################################################################### Tags -# Kernels: -# - `cuda_xx!` (most basic kernels) -# Conditions: -# - `nonconservative_terms::False` -# - `volume_integral::VolumeIntegralWeakForm` -# - `periodicity = true` 1D, 2D, 3D -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Linear Advection" begin - @testset "Linear Advection 1D" begin - advection_velocity = 1.0 - equations = LinearScalarAdvectionEquation1D(advection_velocity) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = -1.0 - coordinates_max = 1.0 - - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 4, - n_cells_max = 30_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, - initial_condition_convergence_test, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Linear Advection 2D" begin - advection_velocity = (0.2, -0.7) - equations = LinearScalarAdvectionEquation2D(advection_velocity) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (-1.0, -1.0) - coordinates_max = (1.0, 1.0) - - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 4, - n_cells_max = 30_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, - initial_condition_convergence_test, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Linear AdVection 3D" begin - advection_velocity = (0.2, -0.7, 0.5) - equations = LinearScalarAdvectionEquation3D(advection_velocity) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (-1.0, -1.0, -1.0) - coordinates_max = (1.0, 1.0, 1.0) - - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 3, - n_cells_max = 30_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, - initial_condition_convergence_test, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_euler_dirichlet.jl b/test/test_euler_dirichlet.jl deleted file mode 100644 index 6edc598..0000000 --- a/test/test_euler_dirichlet.jl +++ /dev/null @@ -1,370 +0,0 @@ -module TestCompressibleEulerBoundary - -####################################################################### Tags -# Kernels: -# -`cuda_prolong2boundaries!` -# - `cuda_boundary_flux!` -# Conditions: -# - `nonconservative_terms::False` -# - `periodicity = false` 1D, 2D, 3D - `BoundaryConditionDirichlet` -# - `source_terms` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Compressible Euler" begin - @testset "Compressible Euler 1D" begin - equations = CompressibleEulerEquations1D(1.4) - - initial_condition = initial_condition_convergence_test - - boundary_condition = BoundaryConditionDirichlet(initial_condition) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0,) - coordinates_max = (2.0,) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test, - boundary_conditions = boundary_condition) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 2D" begin - equations = CompressibleEulerEquations2D(1.4) - - initial_condition = initial_condition_convergence_test - - boundary_condition = BoundaryConditionDirichlet(initial_condition) - boundary_conditions = (x_neg = boundary_condition, - x_pos = boundary_condition, - y_neg = boundary_condition, - y_pos = boundary_condition) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0, 0.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test, - boundary_conditions = boundary_conditions) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 3D" begin - equations = CompressibleEulerEquations3D(1.4) - - initial_condition = initial_condition_convergence_test - - boundary_condition = BoundaryConditionDirichlet(initial_condition) - boundary_conditions = (x_neg = boundary_condition, - x_pos = boundary_condition, - y_neg = boundary_condition, - y_pos = boundary_condition, - z_neg = boundary_condition, - z_pos = boundary_condition) - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0, 0.0, 0.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test, - boundary_conditions = boundary_conditions) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_euler_ec.jl b/test/test_euler_ec.jl deleted file mode 100644 index 07baf88..0000000 --- a/test/test_euler_ec.jl +++ /dev/null @@ -1,347 +0,0 @@ -module TestCompressibleEulerFluxDifferencing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::False` -# - `volume_integral::VolumeIntegralFluxDifferencing` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Compressible Euler" begin - @testset "Compressible Euler 1D" begin - equations = CompressibleEulerEquations1D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_ranocha - solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0,) - coordinates_max = (2.0,) - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 5, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 2D" begin - equations = CompressibleEulerEquations2D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_ranocha - solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0, -2.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 5, - n_cells_max = 10_000, periodicity = true) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition_periodic) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 3D" begin - equations = CompressibleEulerEquations3D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_ranocha - solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0, -2.0, -2.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 3, - n_cells_max = 100_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_euler_shockcapturing.jl b/test/test_euler_shockcapturing.jl deleted file mode 100644 index b754ef1..0000000 --- a/test/test_euler_shockcapturing.jl +++ /dev/null @@ -1,379 +0,0 @@ - -module TestCompressibleEulerShock - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::False` -# - `volume_integral::VolumeIntegralShockCapturingHG` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Compressible Euler" begin - @testset "Compressible Euler 1D" begin - equations = CompressibleEulerEquations1D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - surface_flux = flux_lax_friedrichs - volume_flux = flux_shima_etal - basis = LobattoLegendreBasis(3) - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - solver = DGSEM(basis, surface_flux, volume_integral) - - coordinates_min = -2.0 - coordinates_max = 2.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 2D" begin - equations = CompressibleEulerEquations2D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - surface_flux = flux_lax_friedrichs - volume_flux = flux_shima_etal - basis = LobattoLegendreBasis(3) - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - solver = DGSEM(basis, surface_flux, volume_integral) - - coordinates_min = (-2.0, -2.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 3D" begin - equations = CompressibleEulerEquations3D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - surface_flux = flux_ranocha # OBS! Using a non-dissipative flux is only sensible to test EC, - # but not for real shock simulations - volume_flux = flux_ranocha - polydeg = 3 - basis = LobattoLegendreBasis(polydeg) - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - solver = DGSEM(basis, surface_flux, volume_integral) - - coordinates_min = (-2.0, -2.0, -2.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 100_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_euler_source_terms.jl b/test/test_euler_source_terms.jl deleted file mode 100644 index f07e289..0000000 --- a/test/test_euler_source_terms.jl +++ /dev/null @@ -1,346 +0,0 @@ -module TestCompressibleEulerSourceTerms - -####################################################################### Tags -# Kernels: -# -`cuda_sources!` -# Conditions: -# - `nonconservative_terms::False` -# - `source_terms` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Compressible Euler" begin - @testset "Compressible Euler 1D" begin - equations = CompressibleEulerEquations1D(1.4) - - initial_condition = initial_condition_convergence_test - - solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) - - coordinates_min = 0.0 - coordinates_max = 2.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 2D" begin - equations = CompressibleEulerEquations2D(1.4) - - initial_condition = initial_condition_convergence_test - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0, 0.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 3D" begin - equations = CompressibleEulerEquations3D(1.4) - - initial_condition = initial_condition_convergence_test - - solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, - volume_integral = VolumeIntegralWeakForm()) - - coordinates_min = (0.0, 0.0, 0.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 5.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_eulermulti_ec.jl b/test/test_eulermulti_ec.jl deleted file mode 100644 index fdb5726..0000000 --- a/test/test_eulermulti_ec.jl +++ /dev/null @@ -1,233 +0,0 @@ -module TestCompressibleEulerMultiFluxDifferencing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::False` -# - `volume_integral::VolumeIntegralFluxDifferencing` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Compressible Euler Multicomponent" begin - @testset "Compressible Euler Multicomponent 1D" begin - equations = CompressibleEulerMulticomponentEquations1D(gammas = (1.4, 1.4, 1.4), - gas_constants = (0.4, 0.4, 0.4)) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_ranocha - solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0,) - coordinates_max = (2.0,) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler Multicomponent 2D" begin - equations = CompressibleEulerMulticomponentEquations2D(gammas = 1.4, - gas_constants = 0.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_ranocha - solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0, -2.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_hypdiff_nonperiodic.jl b/test/test_hypdiff_nonperiodic.jl deleted file mode 100644 index 2353618..0000000 --- a/test/test_hypdiff_nonperiodic.jl +++ /dev/null @@ -1,368 +0,0 @@ -module TestHyperbolicDiffusionBoundary - -####################################################################### Tags -# Kernels: -# -`cuda_prolong2boundaries!` -# - `cuda_boundary_flux!` -# Conditions: -# - `nonconservative_terms::False` -# - `periodicity = false` 1D -# - `periodicity = (false, true)` 2D -# - `periodicity = (false, true, true)` 3D -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Hyperbolic Diffusion" begin - @testset "Compressible Euler 1D" begin - equations = HyperbolicDiffusionEquations1D() - - initial_condition = initial_condition_poisson_nonperiodic - - boundary_conditions = boundary_condition_poisson_nonperiodic - - solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) - - coordinates_min = 0.0 - coordinates_max = 1.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 30_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_conditions, - source_terms = source_terms_poisson_nonperiodic) - - tspan = (0.0, 5.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 2D" begin - equations = HyperbolicDiffusionEquations2D() - - initial_condition = initial_condition_poisson_nonperiodic - - boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, - x_pos = boundary_condition_poisson_nonperiodic, - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic) - - solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0, 0.0) - coordinates_max = (1.0, 1.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 30_000, - periodicity = (false, true)) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_conditions, - source_terms = source_terms_poisson_nonperiodic) - - tspan = (0.0, 5.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Compressible Euler 3D" begin - equations = HyperbolicDiffusionEquations3D() - - initial_condition = initial_condition_poisson_nonperiodic - boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, - x_pos = boundary_condition_poisson_nonperiodic, - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic, - z_neg = boundary_condition_periodic, - z_pos = boundary_condition_periodic) - - solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) - - coordinates_min = (0.0, 0.0, 0.0) - coordinates_max = (1.0, 1.0, 1.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - n_cells_max = 30_000, - periodicity = (false, true, true)) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_poisson_nonperiodic, - boundary_conditions = boundary_conditions) - - tspan = (0.0, 5.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_trixicuda.jl b/test/test_macros.jl similarity index 71% rename from test/test_trixicuda.jl rename to test/test_macros.jl index e35fa65..1cccb3c 100644 --- a/test/test_trixicuda.jl +++ b/test/test_macros.jl @@ -1,21 +1,41 @@ # Create some macros to simplify the testing process. - -# Load the required packages, macros, and structs using Trixi, TrixiCUDA +using CUDA using Test: @test, @testset # Macro to test the type Float64 or Float32 ? -# Macro to test the approximate equality of arrays from GPU and CPU, while -# also handling the cases related to NaNs. +# Macro to test the exact equality of arrays from GPU and CPU +macro test_equal(expr) + # Parse the expression and check that it is of the form + # @test_equal (array1, array2) + if expr.head != :tuple || length(expr.args) != 2 + error("Usage: @test_equal (gpu, cpu)") + end + + local gpu = esc(expr.args[1]) + local cpu = esc(expr.args[2]) + + quote + # Convert to arrays to avoid using CUDA.@allowscalar + # to access the elements of some arrays + local gpu_arr = Array($gpu) + local cpu_arr = Array($cpu) + + @test gpu_arr == cpu_arr + end +end + +# Macro to test the approximate equality of arrays from GPU and CPU with NaNs macro test_approx(expr) - # Parse the expression and check that it is of the form array1 ≈ array2 - if expr.head != :call || expr.args[1] != :≈ - error("Usage: @test_approx array1 ≈ array2") + # Parse the expression and check that it is of the form + # @test_approx (array1, array2) + if expr.head != :tuple || length(expr.args) != 2 + error("Usage: @test_approx (gpu, cpu)") end - local gpu = esc(expr.args[2]) - local cpu = esc(expr.args[3]) + local gpu = esc(expr.args[1]) + local cpu = esc(expr.args[2]) quote # Convert to arrays to avoid using CUDA.@allowscalar diff --git a/test/test_mhd_alfven_wave_mortar.jl b/test/test_mhd_alfven_wave_mortar.jl deleted file mode 100644 index c53ffd6..0000000 --- a/test/test_mhd_alfven_wave_mortar.jl +++ /dev/null @@ -1,263 +0,0 @@ -module TestMHDMortar - -####################################################################### Tags -# Kernels: -# - `cuda_prolong2mortars!` -# - `cuda_mortar_flux!` -# Conditions: -# - `nonconservative_terms::True` -# - `periodicity = true` 1D, 2D, 3D -# - `volume_integral::VolumeIntegralFluxDifferencing` -# - `cache_mortars::True` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Ideal GLM MHD" begin - @testset "Ideal GLM MHD 2D" begin - equations = IdealGlmMhdEquations2D(5 / 3) - - initial_condition = initial_condition_convergence_test - - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_hlle, - flux_nonconservative_powell), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (0.0, 0.0) - coordinates_max = (sqrt(2.0), sqrt(2.0)) - refinement_patches = ((type = "box", coordinates_min = 0.25 .* coordinates_max, - coordinates_max = 0.75 .* coordinates_max),) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - refinement_patches = refinement_patches, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Ideal GLM MHD 3D" begin - equations = IdealGlmMhdEquations3D(5 / 3) - - initial_condition = initial_condition_convergence_test - - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_hlle, - flux_nonconservative_powell), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-1.0, -1.0, -1.0) - coordinates_max = (1.0, 1.0, 1.0) - refinement_patches = ((type = "box", coordinates_min = (-0.5, -0.5, -0.5), - coordinates_max = (0.5, 0.5, 0.5)),) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - refinement_patches = refinement_patches, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_mhd_ec.jl b/test/test_mhd_ec.jl deleted file mode 100644 index 7146a0f..0000000 --- a/test/test_mhd_ec.jl +++ /dev/null @@ -1,352 +0,0 @@ -module TestMHDFluxDifferencing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::True` -# - `volume_integral::VolumeIntegralFluxDifferencing` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Ideal GLM MHD" begin - @testset "Ideal GLM MHD 1D" begin - gamma = 2 - equations = IdealGlmMhdEquations1D(gamma) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = flux_hindenlang_gassner - solver = DGSEM(polydeg = 3, surface_flux = flux_hindenlang_gassner, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = 0.0 - coordinates_max = 1.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Ideal GLM MHD 2D" begin - equations = IdealGlmMhdEquations2D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0, -2.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Ideal GLM MHD 3D" begin - equations = IdealGlmMhdEquations3D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-2.0, -2.0, -2.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 0.4) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_mhd_shockcapturing.jl b/test/test_mhd_shockcapturing.jl deleted file mode 100644 index cc6bd50..0000000 --- a/test/test_mhd_shockcapturing.jl +++ /dev/null @@ -1,277 +0,0 @@ -module TestMHDShock - -# Combined with `ShallowWaterEquations1D` to complete the tests for -# shock capturing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::True` -# - `volume_integral::VolumeIntegralShockCapturingHG` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Ideal GLM MHD" begin - @testset "Ideal GLM MHD 2D" begin - equations = IdealGlmMhdEquations2D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - polydeg = 4 - basis = LobattoLegendreBasis(polydeg) - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - - solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, - volume_integral = volume_integral) - - coordinates_min = (-2.0, -2.0) - coordinates_max = (2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Ideal GLM MHD 3D" begin - equations = IdealGlmMhdEquations3D(1.4) - - initial_condition = initial_condition_weak_blast_wave - - surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) - polydeg = 4 - basis = LobattoLegendreBasis(polydeg) - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - - solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, - volume_integral = volume_integral) - - coordinates_min = (-2.0, -2.0, -2.0) - coordinates_max = (2.0, 2.0, 2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left - @test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right - @test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left - @test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_script.jl b/test/test_script.jl index 8fed286..5ce6563 100644 --- a/test/test_script.jl +++ b/test/test_script.jl @@ -1,44 +1,38 @@ include("test_trixicuda.jl") -equations = IdealGlmMhdEquations3D(1.4) - -initial_condition = initial_condition_weak_blast_wave - -surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) -polydeg = 4 -basis = LobattoLegendreBasis(polydeg) -indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = density_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, - volume_integral = volume_integral) - -coordinates_min = (-2.0, -2.0, -2.0) -coordinates_max = (2.0, 2.0, 2.0) +equations = HyperbolicDiffusionEquations1D() + +initial_condition = initial_condition_poisson_nonperiodic + +boundary_conditions = boundary_condition_poisson_nonperiodic + +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +coordinates_min = 0.0 +coordinates_max = 1.0 mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 3, - n_cells_max = 10_000) + n_cells_max = 30_000, + periodicity = false) -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) -semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) +semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) -tspan = (0.0, 1.0) +tspan = (0.0, 5.0) # Get CPU data (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi # Get GPU data mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache -equations_gpu, source_terms_gpu = semi_gpu.equations, semi_gpu.source_terms -initial_condition_gpu, boundary_conditions_gpu = semi_gpu.initial_condition, - semi_gpu.boundary_conditions +equations_gpu = semi_gpu.equations +initial_condition_gpu = semi_gpu.initial_condition +boundary_conditions_gpu = semi_gpu.boundary_conditions +source_terms_gpu = semi_gpu.source_terms # Set initial time t = t_gpu = 0.0 @@ -91,25 +85,6 @@ Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values -# Test `cuda_prolong2mortars!` -TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) -Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) -@test_approx cache_gpu.mortars.u_upper_left ≈ cache.mortars.u_upper_left -@test_approx cache_gpu.mortars.u_upper_right ≈ cache.mortars.u_upper_right -@test_approx cache_gpu.mortars.u_lower_left ≈ cache.mortars.u_lower_left -@test_approx cache_gpu.mortars.u_lower_right ≈ cache.mortars.u_lower_right - -# Test `cuda_mortar_flux!` -TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) -Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) -@test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - # Test `cuda_surface_integral!` TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) diff --git a/test/test_shallowwater_dirichlet.jl b/test/test_shallowwater_dirichlet.jl deleted file mode 100644 index 399b359..0000000 --- a/test/test_shallowwater_dirichlet.jl +++ /dev/null @@ -1,244 +0,0 @@ -module TestShallowWaterBoundary - -####################################################################### Tags -# Kernels: -# -`cuda_prolong2boundaries!` -# - `cuda_boundary_flux!` -# Conditions: -# - `nonconservative_terms::True` -# - `periodicity = false` 1D, 2D, 3D - `BoundaryConditionDirichlet` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Shallow Water" begin - @testset "Shallow Water 1D" begin - equations = ShallowWaterEquations1D(gravity_constant = 9.81, H0 = 3.0) - - initial_condition = initial_condition_convergence_test - - boundary_condition = BoundaryConditionDirichlet(initial_condition) - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 4, - surface_flux = (flux_hll, - flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = 0.0 - coordinates_max = sqrt(2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Shallow Water 2D" begin - equations = ShallowWaterEquations2D(gravity_constant = 9.81) - - initial_condition = initial_condition_convergence_test - - boundary_condition = BoundaryConditionDirichlet(initial_condition) - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (0.0, 0.0) - coordinates_max = (sqrt(2.0), sqrt(2.0)) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_shallowwater_ec.jl b/test/test_shallowwater_ec.jl deleted file mode 100644 index b05147a..0000000 --- a/test/test_shallowwater_ec.jl +++ /dev/null @@ -1,298 +0,0 @@ -module TestShallowWaterFluxDifferencing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::True` -# - `volume_integral::VolumeIntegralFluxDifferencing` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Shallow Water" begin - @testset "Shallow Water 1D" begin - equations = ShallowWaterEquations1D(gravity_constant = 9.81) - - function initial_condition_ec_discontinuous_bottom(x, t, equations::ShallowWaterEquations1D) - # Set the background values - H = 4.25 - v = 0.0 - b = sin(x[1]) # arbitrary continuous function - - # Setup the discontinuous water height and velocity - if x[1] >= 0.125 && x[1] <= 0.25 - H = 5.0 - v = 0.1882 - end - - # Setup a discontinuous bottom topography - if x[1] >= -0.25 && x[1] <= -0.125 - b = 2.0 + 0.5 * sin(2.0 * pi * x[1]) - end - - return prim2cons(SVector(H, v, b), equations) - end - - initial_condition = initial_condition_ec_discontinuous_bottom - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 4, - surface_flux = (flux_fjordholm_etal, flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = -1.0 - coordinates_max = 1.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 2.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Shallow Water 2D" begin - equations = ShallowWaterEquations2D(gravity_constant = 9.81) - - initial_condition = initial_condition_weak_blast_wave - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 4, - surface_flux = (flux_fjordholm_etal, flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (-1.0, -1.0) - coordinates_max = (1.0, 1.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 2, - n_cells_max = 10_000) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - - tspan = (0.0, 2.0) - ode = semidiscretize(semi, tspan) - - function initial_condition_ec_discontinuous_bottom(x, t, element_id, - equations::ShallowWaterEquations2D) - # Set up polar coordinates - inicenter = SVector(0.7, 0.7) - x_norm = x[1] - inicenter[1] - y_norm = x[2] - inicenter[2] - r = sqrt(x_norm^2 + y_norm^2) - phi = atan(y_norm, x_norm) - sin_phi, cos_phi = sincos(phi) - - # Set the background values - H = 4.25 - v1 = 0.0 - v2 = 0.0 - b = 0.0 - - # Setup the discontinuous water height and velocities - if element_id == 10 - H = 5.0 - v1 = 0.1882 * cos_phi - v2 = 0.1882 * sin_phi - end - - # Setup a discontinuous bottom topography using the element id number - if element_id == 7 - b = 2.0 + 0.5 * sin(2.0 * pi * x[1]) + 0.5 * cos(2.0 * pi * x[2]) - end - - return prim2cons(SVector(H, v1, v2, b), equations) - end - - # Point to the data we want to augment - u = Trixi.wrap_array(ode.u0, semi) - # Reset the initial condition - for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - u_node = initial_condition_ec_discontinuous_bottom(x_node, first(tspan), element, - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end - end - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - # ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - # u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/test_shallowwater_shockcapturing.jl b/test/test_shallowwater_shockcapturing.jl deleted file mode 100644 index 29c4fe3..0000000 --- a/test/test_shallowwater_shockcapturing.jl +++ /dev/null @@ -1,123 +0,0 @@ -module TestShallowWaterShock - -# Combined with `IdealGlmMhdEquations2D` and `IdealGlmMhdEquations3D` -# to complete the tests for shock capturing - -####################################################################### Tags -# Kernels: -# -`cuda_volume_integral!` -# Conditions: -# - `nonconservative_terms::True` -# - `volume_integral::VolumeIntegralShockCapturingHG` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Shallow Water" begin - @testset "Shallow Water 1D" begin - equations = ShallowWaterEquations1D(gravity_constant = 9.812, H0 = 1.75) - - function initial_condition_stone_throw_discontinuous_bottom(x, t, - equations::ShallowWaterEquations1D) - - # Calculate primitive variables - - # Flat lake - H = equations.H0 - - # Discontinuous velocity - v = 0.0 - if x[1] >= -0.75 && x[1] <= 0.0 - v = -1.0 - elseif x[1] >= 0.0 && x[1] <= 0.75 - v = 1.0 - end - - b = (1.5 / exp(0.5 * ((x[1] - 1.0)^2)) + - 0.75 / exp(0.5 * ((x[1] + 1.0)^2))) - - # Force a discontinuous bottom topography - if x[1] >= -1.5 && x[1] <= 0.0 - b = 0.5 - end - - return prim2cons(SVector(H, v, b), equations) - end - - initial_condition = initial_condition_stone_throw_discontinuous_bottom - - boundary_condition = boundary_condition_slip_wall - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - surface_flux = (FluxHydrostaticReconstruction(flux_lax_friedrichs, - hydrostatic_reconstruction_audusse_etal), - flux_nonconservative_audusse_etal) - basis = LobattoLegendreBasis(4) - - indicator_sc = IndicatorHennemannGassner(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) - volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - - solver = DGSEM(basis, surface_flux, volume_integral) - - coordinates_min = -3.0 - coordinates_max = 3.0 - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = false) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - - tspan = (0.0, 3.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Wait for fix of boundary flux dispatches - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Shallow Water 2D" begin end -end - -end # module diff --git a/test/test_shallowwater_source_terms.jl b/test/test_shallowwater_source_terms.jl deleted file mode 100644 index 0a286c0..0000000 --- a/test/test_shallowwater_source_terms.jl +++ /dev/null @@ -1,238 +0,0 @@ -module TestShallowWaterSourceTerms - -####################################################################### Tags -# Kernels: -# -`cuda_sources!` -# Conditions: -# - `nonconservative_terms::True` -# - `volume_integral::VolumeIntegralFluxDifferencing` -# - `source_terms` -####################################################################### - -include("test_trixicuda.jl") - -# Test precision of the semidiscretization process -@testset "Test Shallow Water" begin - @testset "Shallow Water 1D" begin - equations = ShallowWaterEquations1D(gravity_constant = 9.81) - - initial_condition = initial_condition_convergence_test - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = 0.0 - coordinates_max = sqrt(2.0) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end - - @testset "Shallow Water 2D" begin - equations = ShallowWaterEquations2D(gravity_constant = 9.81) - - initial_condition = initial_condition_convergence_test # MMS EOC test - - volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - solver = DGSEM(polydeg = 3, - surface_flux = (flux_lax_friedrichs, flux_nonconservative_fjordholm_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - - coordinates_min = (0.0, 0.0) - coordinates_max = (sqrt(2.0), sqrt(2.0)) - mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - - semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - - tspan = (0.0, 1.0) - - # Get CPU data - (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi - - # Get GPU data - equations_gpu = deepcopy(equations) - mesh_gpu, solver_gpu, cache_gpu = deepcopy(mesh), deepcopy(solver), deepcopy(cache) - boundary_conditions_gpu, source_terms_gpu = deepcopy(boundary_conditions), - deepcopy(source_terms) - - # Set initial time - t = t_gpu = 0.0 - - # Get initial data - ode = semidiscretize(semi, tspan) - u_ode = copy(ode.u0) - du_ode = similar(u_ode) - u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) - du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) - - # Copy data to device - du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) - # Reset data on host - Trixi.reset_du!(du, solver, cache) - - # Test `cuda_volume_integral!` - TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, solver_gpu, - cache_gpu) - Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), - equations, solver.volume_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_prolong2interfaces!` - TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.interfaces.u ≈ cache.interfaces.u - - # Test `cuda_interface_flux!` - TrixiCUDA.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2boundaries!` - TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, - cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) - @test_approx cache_gpu.boundaries.u ≈ cache.boundaries.u - - # Test `cuda_boundary_flux!` - TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, - Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, - solver.surface_integral, solver) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_prolong2mortars!` - TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - solver_gpu, cache_gpu) - Trixi.prolong2mortars!(cache, u, mesh, equations, - solver.mortar, solver.surface_integral, solver) - @test_approx cache_gpu.mortars.u_upper ≈ cache.mortars.u_upper - @test_approx cache_gpu.mortars.u_lower ≈ cache.mortars.u_lower - - # Test `cuda_mortar_flux!` - TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), - Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, - solver_gpu, cache_gpu) - Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, - Trixi.have_nonconservative_terms(equations), equations, - solver.mortar, solver.surface_integral, solver, cache) - @test_approx cache_gpu.elements.surface_flux_values ≈ cache.elements.surface_flux_values - - # Test `cuda_surface_integral!` - TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_jacobian!` - TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Test `cuda_sources!` - TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) - Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - @test_approx du_gpu ≈ du - - # Copy data back to host - du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) - end -end - -end # module diff --git a/test/tree_dgsem_1d/advection_amr.jl b/test/tree_dgsem_1d/advection_amr.jl new file mode 100644 index 0000000..95060d2 --- /dev/null +++ b/test/tree_dgsem_1d/advection_amr.jl @@ -0,0 +1,133 @@ +module TestAdvectionAMR1D + +include("../test_macros.jl") + +@testset "Advection AMR 1D" begin + advection_velocity = 1.0 + equations = LinearScalarAdvectionEquation1D(advection_velocity) + + initial_condition = initial_condition_gauss + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-5.0,) + coordinates_max = (5.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 10.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/advection_amr_nonperiodic.jl b/test/tree_dgsem_1d/advection_amr_nonperiodic.jl new file mode 100644 index 0000000..8981c7d --- /dev/null +++ b/test/tree_dgsem_1d/advection_amr_nonperiodic.jl @@ -0,0 +1,141 @@ +module TestAdvectionAMRNonperiodic1D + +include("../test_macros.jl") + +@testset "Advection AMR Nonperiodic 1D" begin + advection_velocity = 1.0 + equations = LinearScalarAdvectionEquation1D(advection_velocity) + + initial_condition = initial_condition_gauss + boundary_conditions = BoundaryConditionDirichlet(initial_condition) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0,) + coordinates_max = (5.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, + initial_condition, + solver, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, + initial_condition, + solver, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/advection_basic.jl b/test/tree_dgsem_1d/advection_basic.jl new file mode 100644 index 0000000..2d84d7c --- /dev/null +++ b/test/tree_dgsem_1d/advection_basic.jl @@ -0,0 +1,134 @@ +module TestAdvectionBasic1D + +include("../test_macros.jl") + +@testset "Advection Basic 1D" begin + advection_velocity = 1.0 + equations = LinearScalarAdvectionEquation1D(advection_velocity) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = -1.0 + coordinates_max = 1.0 + + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition_convergence_test, + solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/advection_extended.jl b/test/tree_dgsem_1d/advection_extended.jl new file mode 100644 index 0000000..f84921d --- /dev/null +++ b/test/tree_dgsem_1d/advection_extended.jl @@ -0,0 +1,139 @@ +module TestAdvectionExtended1D + +include("../test_macros.jl") + +@testset "Advection Extended 1D" begin + advection_velocity = 1.0 + equations = LinearScalarAdvectionEquation1D(advection_velocity) + + initial_condition = initial_condition_convergence_test + + boundary_conditions = boundary_condition_periodic + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = -1.0 + coordinates_max = 1.0 + + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000, + periodicity = true) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/burgers_basic.jl b/test/tree_dgsem_1d/burgers_basic.jl new file mode 100644 index 0000000..d84087b --- /dev/null +++ b/test/tree_dgsem_1d/burgers_basic.jl @@ -0,0 +1,134 @@ +module TestBugersBasic1D + +include("../test_macros.jl") + +@testset "Burgers Basic 1D" begin + equations = InviscidBurgersEquation1D() + + initial_condition = initial_condition_convergence_test + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = 0.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/burgers_linear_stability.jl b/test/tree_dgsem_1d/burgers_linear_stability.jl new file mode 100644 index 0000000..8267dc1 --- /dev/null +++ b/test/tree_dgsem_1d/burgers_linear_stability.jl @@ -0,0 +1,139 @@ +module TestBurgersLinearStability1D + +include("../test_macros.jl") + +@testset "Burgers Linear Stability 1D" begin + equations = InviscidBurgersEquation1D() + + function initial_condition_linear_stability(x, t, equation::InviscidBurgersEquation1D) + k = 1 + 2 + sinpi(k * (x[1] - 0.7)) |> SVector + end + + volume_flux = flux_ec + solver = DGSEM(polydeg = 3, surface_flux = flux_ec, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = -1.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_linear_stability, + solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition_linear_stability, + solver) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/burgers_rarefraction.jl b/test/tree_dgsem_1d/burgers_rarefraction.jl new file mode 100644 index 0000000..5aa7aa3 --- /dev/null +++ b/test/tree_dgsem_1d/burgers_rarefraction.jl @@ -0,0 +1,172 @@ +module TestBurgersRarefaction1D + +include("../test_macros.jl") + +@testset "Burgers Rarefaction 1D" begin + equations = InviscidBurgersEquation1D() + + basis = LobattoLegendreBasis(3) + # Use shock capturing techniques to suppress oscillations at discontinuities + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = first) + + volume_flux = flux_ec + surface_flux = flux_lax_friedrichs + + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinate_min = 0.0 + coordinate_max = 1.0 + + mesh = TreeMesh(coordinate_min, coordinate_max, + initial_refinement_level = 6, + n_cells_max = 10_000, + periodicity = false) + + # Discontinuous initial condition (Riemann Problem) leading to a rarefaction fan. + function initial_condition_rarefaction(x, t, equation::InviscidBurgersEquation1D) + scalar = x[1] < 0.5 ? 0.5 : 1.5 + + return SVector(scalar) + end + + boundary_condition_inflow = BoundaryConditionDirichlet(initial_condition_rarefaction) + + function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::InviscidBurgersEquation1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux + end + + boundary_conditions = (x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + + initial_condition = initial_condition_rarefaction + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 0.2) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/burgers_shock.jl b/test/tree_dgsem_1d/burgers_shock.jl new file mode 100644 index 0000000..554c7f4 --- /dev/null +++ b/test/tree_dgsem_1d/burgers_shock.jl @@ -0,0 +1,173 @@ +module TestBurgersShock1D + +include("../test_macros.jl") + +@testset "Burgers Shock 1D" begin + equations = InviscidBurgersEquation1D() + + basis = LobattoLegendreBasis(3) + # Use shock capturing techniques to suppress oscillations at discontinuities + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = first) + + volume_flux = flux_ec + surface_flux = flux_lax_friedrichs + + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = surface_flux, + volume_flux_fv = surface_flux) + + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinate_min = 0.0 + coordinate_max = 1.0 + + # Make sure to turn periodicity explicitly off as special boundary conditions are specified + mesh = TreeMesh(coordinate_min, coordinate_max, + initial_refinement_level = 6, + n_cells_max = 10_000, + periodicity = false) + + # Discontinuous initial condition (Riemann Problem) leading to a shock to test e.g. correct shock speed. + function initial_condition_shock(x, t, equation::InviscidBurgersEquation1D) + scalar = x[1] < 0.5 ? 1.5 : 0.5 + + return SVector(scalar) + end + + boundary_condition_inflow = BoundaryConditionDirichlet(initial_condition_shock) + + function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::InviscidBurgersEquation1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux + end + + boundary_conditions = (x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + + initial_condition = initial_condition_shock + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 0.2) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/euler_blast_wave.jl b/test/tree_dgsem_1d/euler_blast_wave.jl new file mode 100644 index 0000000..a2a209a --- /dev/null +++ b/test/tree_dgsem_1d/euler_blast_wave.jl @@ -0,0 +1,157 @@ +module TestEulerBlastWave1D + +include("../test_macros.jl") + +@testset "Euler Blast Wave 1D" begin + equations = CompressibleEulerEquations1D(1.4) + + function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations1D) + # Set up polar coordinates + inicenter = SVector(0.0) + x_norm = x[1] - inicenter[1] + r = abs(x_norm) + cos_phi = x_norm > 0 ? one(x_norm) : -one(x_norm) + + # Calculate primitive variables + rho = r > 0.5 ? 1.0 : 1.1691 + v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi + p = r > 0.5 ? 1.0E-3 : 1.245 + + return prim2cons(SVector(rho, v1, p), equations) + end + initial_condition = initial_condition_blast_wave + + surface_flux = flux_lax_friedrichs + volume_flux = flux_ranocha + basis = LobattoLegendreBasis(3) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinates_min = (-2.0,) + coordinates_max = (2.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 12.5) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/euler_ec.jl b/test/tree_dgsem_1d/euler_ec.jl new file mode 100644 index 0000000..b8c6890 --- /dev/null +++ b/test/tree_dgsem_1d/euler_ec.jl @@ -0,0 +1,134 @@ +module TestEulerEC1D + +include("../test_macros.jl") + +@testset "Euler EC 1D" begin + equations = CompressibleEulerEquations1D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0,) + coordinates_max = (2.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/euler_shock.jl b/test/tree_dgsem_1d/euler_shock.jl new file mode 100644 index 0000000..1b12981 --- /dev/null +++ b/test/tree_dgsem_1d/euler_shock.jl @@ -0,0 +1,143 @@ +module TestEulerShock1D + +include("../test_macros.jl") + +@testset "Euler Shock 1D" begin + equations = CompressibleEulerEquations1D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + surface_flux = flux_lax_friedrichs + volume_flux = flux_shima_etal + basis = LobattoLegendreBasis(3) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinates_min = -2.0 + coordinates_max = 2.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/euler_source_terms.jl b/test/tree_dgsem_1d/euler_source_terms.jl new file mode 100644 index 0000000..25100a8 --- /dev/null +++ b/test/tree_dgsem_1d/euler_source_terms.jl @@ -0,0 +1,134 @@ +module TestEulerSourceTerms1D + +include("../test_macros.jl") + +@testset "Euler Source Terms 1D" begin + equations = CompressibleEulerEquations1D(1.4) + + initial_condition = initial_condition_convergence_test + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = 0.0 + coordinates_max = 2.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/euler_source_terms_nonperiodic.jl b/test/tree_dgsem_1d/euler_source_terms_nonperiodic.jl new file mode 100644 index 0000000..c984cb9 --- /dev/null +++ b/test/tree_dgsem_1d/euler_source_terms_nonperiodic.jl @@ -0,0 +1,141 @@ +module TestEulerSourceTermsNonperiodic1D + +include("../test_macros.jl") + +@testset "Euler Source Terms Nonperiodic 1D" begin + equations = CompressibleEulerEquations1D(1.4) + + initial_condition = initial_condition_convergence_test + + boundary_condition = BoundaryConditionDirichlet(initial_condition) + boundary_conditions = (x_neg = boundary_condition, + x_pos = boundary_condition) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0,) + coordinates_max = (2.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/eulermulti_ec.jl b/test/tree_dgsem_1d/eulermulti_ec.jl new file mode 100644 index 0000000..bbbb556 --- /dev/null +++ b/test/tree_dgsem_1d/eulermulti_ec.jl @@ -0,0 +1,135 @@ +module TestEulerMultiEC1D + +include("../test_macros.jl") + +@testset "Euler Multi EC 1D" begin + equations = CompressibleEulerMulticomponentEquations1D(gammas = (1.4, 1.4, 1.4), + gas_constants = (0.4, 0.4, 0.4)) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0,) + coordinates_max = (2.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/eulermulti_es.jl b/test/tree_dgsem_1d/eulermulti_es.jl new file mode 100644 index 0000000..ed11a41 --- /dev/null +++ b/test/tree_dgsem_1d/eulermulti_es.jl @@ -0,0 +1,135 @@ +module TestEulerMultiES1D + +include("../test_macros.jl") + +@testset "Euler Multi ES 1D" begin + equations = CompressibleEulerMulticomponentEquations1D(gammas = (1.4, 1.4), + gas_constants = (0.4, 0.4)) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0,) + coordinates_max = (2.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/eulerquasi_ec.jl b/test/tree_dgsem_1d/eulerquasi_ec.jl new file mode 100644 index 0000000..dddaa52 --- /dev/null +++ b/test/tree_dgsem_1d/eulerquasi_ec.jl @@ -0,0 +1,144 @@ +module TestEulerQuasiEC1D + +include("../test_macros.jl") + +@testset "Euler Quasi EC 1D" begin + equations = CompressibleEulerEquationsQuasi1D(1.4) + + function initial_condition_ec(x, t, equations::CompressibleEulerEquationsQuasi1D) + v1 = 0.1 + rho = 2.0 + 0.1 * x[1] + p = 3.0 + a = 2.0 + x[1] + + return prim2cons(SVector(rho, v1, p, a), equations) + end + + initial_condition = initial_condition_ec + + surface_flux = (flux_chan_etal, flux_nonconservative_chan_etal) + volume_flux = surface_flux + solver = DGSEM(polydeg = 4, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-1.0,) + coordinates_max = (1.0,) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/eulerquasi_source_terms.jl b/test/tree_dgsem_1d/eulerquasi_source_terms.jl new file mode 100644 index 0000000..5ed078f --- /dev/null +++ b/test/tree_dgsem_1d/eulerquasi_source_terms.jl @@ -0,0 +1,137 @@ +module TestEulerQuasiSourceTerms1D + +include("../test_macros.jl") + +@testset "Euler Quasi Source Terms 1D" begin + equations = CompressibleEulerEquationsQuasi1D(1.4) + + initial_condition = initial_condition_convergence_test + + surface_flux = (flux_chan_etal, flux_nonconservative_chan_etal) + volume_flux = surface_flux + solver = DGSEM(polydeg = 4, surface_flux = surface_flux, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = -1.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/hypdiff_harmonic_nonperiodic.jl b/test/tree_dgsem_1d/hypdiff_harmonic_nonperiodic.jl new file mode 100644 index 0000000..f2e4fa2 --- /dev/null +++ b/test/tree_dgsem_1d/hypdiff_harmonic_nonperiodic.jl @@ -0,0 +1,153 @@ +module TestHypdiffHarmonicNonperiodic1D + +include("../test_macros.jl") + +@testset "Hypdiff Harmonic Nonperiodic 1D" begin + equations = HyperbolicDiffusionEquations1D(nu = 1.25) + + function initial_condition_harmonic_nonperiodic(x, t, + equations::HyperbolicDiffusionEquations1D) + # elliptic equation: -νΔϕ = f + if t == 0.0 + phi = 5.0 + q1 = 0.0 + else + A = 3 + B = exp(1) + phi = A + B * x[1] + q1 = B + end + return SVector(phi, q1) + end + initial_condition = initial_condition_harmonic_nonperiodic + + boundary_conditions = BoundaryConditionDirichlet(initial_condition) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = -1.0 + coordinates_max = 2.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 30_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_harmonic) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_harmonic) + + tspan = (0.0, 30.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/hypdiff_nonperiodic.jl b/test/tree_dgsem_1d/hypdiff_nonperiodic.jl new file mode 100644 index 0000000..c13e30e --- /dev/null +++ b/test/tree_dgsem_1d/hypdiff_nonperiodic.jl @@ -0,0 +1,139 @@ +module TestHypdiffNonperiodic1D + +include("../test_macros.jl") + +@testset "Hypdiff Nonperiodic 1D" begin + equations = HyperbolicDiffusionEquations1D() + + initial_condition = initial_condition_poisson_nonperiodic + + boundary_conditions = boundary_condition_poisson_nonperiodic + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = 0.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/mhd_alfven_wave.jl b/test/tree_dgsem_1d/mhd_alfven_wave.jl new file mode 100644 index 0000000..7ff92f1 --- /dev/null +++ b/test/tree_dgsem_1d/mhd_alfven_wave.jl @@ -0,0 +1,135 @@ +module TestMHDAlfvenWave1D + +include("../test_macros.jl") + +@testset "MHD Alfven Wave 1D" begin + gamma = 5 / 3 + equations = IdealGlmMhdEquations1D(gamma) + + initial_condition = initial_condition_convergence_test + + volume_flux = flux_hindenlang_gassner + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = 0.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/mhd_ec.jl b/test/tree_dgsem_1d/mhd_ec.jl new file mode 100644 index 0000000..b3e0904 --- /dev/null +++ b/test/tree_dgsem_1d/mhd_ec.jl @@ -0,0 +1,135 @@ +module TestMHDEC1D + +include("../test_macros.jl") + +@testset "MHD EC 1D" begin + gamma = 2 + equations = IdealGlmMhdEquations1D(gamma) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_hindenlang_gassner + solver = DGSEM(polydeg = 3, surface_flux = flux_hindenlang_gassner, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = 0.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/shallowwater_shock.jl b/test/tree_dgsem_1d/shallowwater_shock.jl new file mode 100644 index 0000000..540212d --- /dev/null +++ b/test/tree_dgsem_1d/shallowwater_shock.jl @@ -0,0 +1,176 @@ +module TestShallowWaterShock1D + +include("../test_macros.jl") + +@testset "Shallow Water Shock 1D" begin + equations = ShallowWaterEquations1D(gravity_constant = 9.812, H0 = 1.75) + + function initial_condition_stone_throw_discontinuous_bottom(x, t, + equations::ShallowWaterEquations1D) + # Flat lake + H = equations.H0 + + # Discontinuous velocity + v = 0.0 + if x[1] >= -0.75 && x[1] <= 0.0 + v = -1.0 + elseif x[1] >= 0.0 && x[1] <= 0.75 + v = 1.0 + end + + b = (1.5 / exp(0.5 * ((x[1] - 1.0)^2)) + + 0.75 / exp(0.5 * ((x[1] + 1.0)^2))) + + # Force a discontinuous bottom topography + if x[1] >= -1.5 && x[1] <= 0.0 + b = 0.5 + end + + return prim2cons(SVector(H, v, b), equations) + end + + initial_condition = initial_condition_stone_throw_discontinuous_bottom + + boundary_condition = boundary_condition_slip_wall + + volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) + surface_flux = (FluxHydrostaticReconstruction(flux_lax_friedrichs, + hydrostatic_reconstruction_audusse_etal), + flux_nonconservative_audusse_etal) + basis = LobattoLegendreBasis(4) + + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = waterheight_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinates_min = -3.0 + coordinates_max = 3.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 10_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_condition) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_condition) + + tspan = (0.0, 3.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_1d/tree_dgsem_1d.jl b/test/tree_dgsem_1d/tree_dgsem_1d.jl new file mode 100644 index 0000000..941d89a --- /dev/null +++ b/test/tree_dgsem_1d/tree_dgsem_1d.jl @@ -0,0 +1,22 @@ +include("advection_amr_nonperiodic.jl") +include("advection_amr.jl") +include("advection_basic.jl") +include("advection_extended.jl") +include("burgers_basic.jl") +include("burgers_linear_stability.jl") +include("burgers_rarefraction.jl") +include("burgers_shock.jl") +include("euler_blast_wave.jl") +include("euler_ec.jl") +include("euler_shock.jl") +include("euler_source_terms_nonperiodic.jl") +include("euler_source_terms.jl") +include("eulermulti_ec.jl") +include("eulermulti_es.jl") +include("eulerquasi_ec.jl") +include("eulerquasi_source_terms.jl") +include("hypdiff_harmonic_nonperiodic.jl") +include("hypdiff_nonperiodic.jl") +include("mhd_alfven_wave.jl") +include("mhd_ec.jl") +include("shallowwater_shock.jl") diff --git a/test/tree_dgsem_2d/advection_basic.jl b/test/tree_dgsem_2d/advection_basic.jl new file mode 100644 index 0000000..4911422 --- /dev/null +++ b/test/tree_dgsem_2d/advection_basic.jl @@ -0,0 +1,157 @@ +module TestAdvectionBasic2D + +include("../test_macros.jl") + +@testset "Advection Basic 2D" begin + advection_velocity = (0.2, -0.7) + equations = LinearScalarAdvectionEquation2D(advection_velocity) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-1.0, -1.0) + coordinates_max = (1.0, 1.0) + + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition_convergence_test, + solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/advection_mortar.jl b/test/tree_dgsem_2d/advection_mortar.jl new file mode 100644 index 0000000..fc417ed --- /dev/null +++ b/test/tree_dgsem_2d/advection_mortar.jl @@ -0,0 +1,158 @@ +module TestAdvectionMortar2D + +include("../test_macros.jl") + +@testset "Advection Mortar 2D" begin + advection_velocity = (0.2, -0.7) + equations = LinearScalarAdvectionEquation2D(advection_velocity) + + initial_condition = initial_condition_convergence_test + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-1.0, -1.0) + coordinates_max = (1.0, 1.0) + refinement_patches = ((type = "box", coordinates_min = (0.0, -1.0), + coordinates_max = (1.0, 1.0)),) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/euler_shock.jl b/test/tree_dgsem_2d/euler_shock.jl new file mode 100644 index 0000000..05c7077 --- /dev/null +++ b/test/tree_dgsem_2d/euler_shock.jl @@ -0,0 +1,166 @@ +module TestEulerShock2D + +include("../test_macros.jl") + +@testset "Euler Shock 2D" begin + equations = CompressibleEulerEquations2D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + surface_flux = flux_lax_friedrichs + volume_flux = flux_shima_etal + basis = LobattoLegendreBasis(3) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinates_min = (-2.0, -2.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/euler_source_terms.jl b/test/tree_dgsem_2d/euler_source_terms.jl new file mode 100644 index 0000000..3254ea4 --- /dev/null +++ b/test/tree_dgsem_2d/euler_source_terms.jl @@ -0,0 +1,156 @@ +module TestEulerSourceTerms2D + +include("../test_macros.jl") + +@testset "Euler Source Terms 2D" begin + equations = CompressibleEulerEquations2D(1.4) + + initial_condition = initial_condition_convergence_test + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/euler_source_terms_nonperiodic.jl b/test/tree_dgsem_2d/euler_source_terms_nonperiodic.jl new file mode 100644 index 0000000..ee5610a --- /dev/null +++ b/test/tree_dgsem_2d/euler_source_terms_nonperiodic.jl @@ -0,0 +1,166 @@ +module TestEulerSourceTermsNonperiodic2D + +include("../test_macros.jl") + +@testset "Euler Source Terms Nonperiodic 2D" begin + equations = CompressibleEulerEquations2D(1.4) + + initial_condition = initial_condition_convergence_test + + boundary_condition = BoundaryConditionDirichlet(initial_condition) + boundary_conditions = (x_neg = boundary_condition, + x_pos = boundary_condition, + y_neg = boundary_condition, + y_pos = boundary_condition) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/euler_vortex_mortar.jl b/test/tree_dgsem_2d/euler_vortex_mortar.jl new file mode 100644 index 0000000..d9a7b84 --- /dev/null +++ b/test/tree_dgsem_2d/euler_vortex_mortar.jl @@ -0,0 +1,185 @@ +module TestEulerVortexMortar2D + +include("../test_macros.jl") + +@testset "Euler Vortex Mortar 2D" begin + equations = CompressibleEulerEquations2D(1.4) + + function initial_condition_isentropic_vortex(x, t, equations::CompressibleEulerEquations2D) + inicenter = SVector(0.0, 0.0) + # size and strength of the vortex + iniamplitude = 5.0 + # base flow + rho = 1.0 + v1 = 1.0 + v2 = 1.0 + vel = SVector(v1, v2) + p = 25.0 + rt = p / rho # ideal gas equation + t_loc = 0.0 + cent = inicenter + vel * t_loc # advection of center + + cent = x - cent # distance to center point + + cent = SVector(-cent[2], cent[1]) + r2 = cent[1]^2 + cent[2]^2 + du = iniamplitude / (2 * π) * exp(0.5 * (1 - r2)) # vel. perturbation + dtemp = -(equations.gamma - 1) / (2 * equations.gamma * rt) * du^2 # isentropic + rho = rho * (1 + dtemp)^(1 / (equations.gamma - 1)) + vel = vel + du * cent + v1, v2 = vel + p = p * (1 + dtemp)^(equations.gamma / (equations.gamma - 1)) + prim = SVector(rho, v1, v2, p) + return prim2cons(prim, equations) + end + + initial_condition = initial_condition_isentropic_vortex + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-10.0, -10.0) + coordinates_max = (10.0, 10.0) + refinement_patches = ((type = "box", coordinates_min = (0.0, -10.0), + coordinates_max = (10.0, 10.0)),) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/eulermulti_ec.jl b/test/tree_dgsem_2d/eulermulti_ec.jl new file mode 100644 index 0000000..016f279 --- /dev/null +++ b/test/tree_dgsem_2d/eulermulti_ec.jl @@ -0,0 +1,158 @@ +module TestEulerMultiEC2D + +include("../test_macros.jl") + +@testset "Euler Multi EC 2D" begin + equations = CompressibleEulerMulticomponentEquations2D(gammas = 1.4, + gas_constants = 0.4) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0, -2.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/eulermulti_es.jl b/test/tree_dgsem_2d/eulermulti_es.jl new file mode 100644 index 0000000..1c543a0 --- /dev/null +++ b/test/tree_dgsem_2d/eulermulti_es.jl @@ -0,0 +1,158 @@ +module TestEulerMultiES2D + +include("../test_macros.jl") + +@testset "Euler Multi ES 2D" begin + equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4, 1.4, 1.4), + gas_constants = (0.4, 0.4, 0.4, 0.4)) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0, -2.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/hypdiff_nonperiodic.jl b/test/tree_dgsem_2d/hypdiff_nonperiodic.jl new file mode 100644 index 0000000..7762604 --- /dev/null +++ b/test/tree_dgsem_2d/hypdiff_nonperiodic.jl @@ -0,0 +1,165 @@ +module TestHypdiffNonperiodic2D + +include("../test_macros.jl") + +@testset "Hypdiff Nonperiodic 2D" begin + equations = HyperbolicDiffusionEquations2D() + + initial_condition = initial_condition_poisson_nonperiodic + # 1 => -x, 2 => +x, 3 => -y, 4 => +y as usual for orientations + boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic) + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0) + coordinates_max = (1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = (false, true)) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/mhd_alfven_wave.jl b/test/tree_dgsem_2d/mhd_alfven_wave.jl new file mode 100644 index 0000000..a08f87d --- /dev/null +++ b/test/tree_dgsem_2d/mhd_alfven_wave.jl @@ -0,0 +1,159 @@ +module TestMHDAlfvenWave2D + +include("../test_macros.jl") + +@testset "MHD Alfven Wave 2D" begin + gamma = 5 / 3 + equations = IdealGlmMhdEquations2D(gamma) + + initial_condition = initial_condition_convergence_test + + volume_flux = (flux_central, flux_nonconservative_powell) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (0.0, 0.0) + coordinates_max = (sqrt(2.0), sqrt(2.0)) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/mhd_alfven_wave_mortar.jl b/test/tree_dgsem_2d/mhd_alfven_wave_mortar.jl new file mode 100644 index 0000000..578e74a --- /dev/null +++ b/test/tree_dgsem_2d/mhd_alfven_wave_mortar.jl @@ -0,0 +1,163 @@ +module TestMHDAlfvenWaveMortar2D + +include("../test_macros.jl") + +@testset "MHD Alfven Wave Mortar 2D" begin + gamma = 5 / 3 + equations = IdealGlmMhdEquations2D(gamma) + + initial_condition = initial_condition_convergence_test + + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_hlle, + flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (0.0, 0.0) + coordinates_max = (sqrt(2.0), sqrt(2.0)) + refinement_patches = ((type = "box", coordinates_min = 0.25 .* coordinates_max, + coordinates_max = 0.75 .* coordinates_max),) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/mhd_shock.jl b/test/tree_dgsem_2d/mhd_shock.jl new file mode 100644 index 0000000..523feb6 --- /dev/null +++ b/test/tree_dgsem_2d/mhd_shock.jl @@ -0,0 +1,169 @@ +module TestMHDShock2D + +include("../test_macros.jl") + +@testset "MHD Shock 2D" begin + equations = IdealGlmMhdEquations2D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + polydeg = 4 + basis = LobattoLegendreBasis(polydeg) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + + solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + + coordinates_min = (-2.0, -2.0) + coordinates_max = (2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/shallowwater_ec.jl b/test/tree_dgsem_2d/shallowwater_ec.jl new file mode 100644 index 0000000..8740a81 --- /dev/null +++ b/test/tree_dgsem_2d/shallowwater_ec.jl @@ -0,0 +1,158 @@ +module TestShallowWaterEC2D + +include("../test_macros.jl") + +@testset "Shallow Water EC 2D" begin + equations = ShallowWaterEquations2D(gravity_constant = 9.81) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) + solver = DGSEM(polydeg = 4, + surface_flux = (flux_fjordholm_etal, flux_nonconservative_fjordholm_etal), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-1.0, -1.0) + coordinates_max = (1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 2.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/shallowwater_source_terms.jl b/test/tree_dgsem_2d/shallowwater_source_terms.jl new file mode 100644 index 0000000..b3167b3 --- /dev/null +++ b/test/tree_dgsem_2d/shallowwater_source_terms.jl @@ -0,0 +1,161 @@ +module TestShallowWaterSourceTerms2D + +include("../test_macros.jl") + +@testset "Shallow Water Source Terms 2D" begin + equations = ShallowWaterEquations2D(gravity_constant = 9.81) + + initial_condition = initial_condition_convergence_test # MMS EOC test + + volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_lax_friedrichs, flux_nonconservative_fjordholm_etal), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (0.0, 0.0) + coordinates_max = (sqrt(2.0), sqrt(2.0)) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 10_000, + periodicity = true) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/shawllowwater_source_terms_nonperiodic.jl b/test/tree_dgsem_2d/shawllowwater_source_terms_nonperiodic.jl new file mode 100644 index 0000000..ff81a2d --- /dev/null +++ b/test/tree_dgsem_2d/shawllowwater_source_terms_nonperiodic.jl @@ -0,0 +1,165 @@ +module TestShallowWaterSourceTermsNonperiodic2D + +include("../test_macros.jl") + +@testset "Shallow Water Source Terms Nonperiodic 2D" begin + equations = ShallowWaterEquations2D(gravity_constant = 9.81) + + initial_condition = initial_condition_convergence_test + + boundary_condition = BoundaryConditionDirichlet(initial_condition) + + volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_lax_friedrichs, flux_nonconservative_fjordholm_etal), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (0.0, 0.0) + coordinates_max = (sqrt(2.0), sqrt(2.0)) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 10_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_condition, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_condition, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, solver.mortar, + solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper, cache.mortars.u_upper) + @test_approx (cache_gpu.mortars.u_lower, cache.mortars.u_lower) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_2d/tree_dgsem_2d.jl b/test/tree_dgsem_2d/tree_dgsem_2d.jl new file mode 100644 index 0000000..7520f58 --- /dev/null +++ b/test/tree_dgsem_2d/tree_dgsem_2d.jl @@ -0,0 +1,15 @@ +include("advection_basic.jl") +include("advection_mortar.jl") +include("euler_shock.jl") +include("euler_source_terms_nonperiodic.jl") +include("euler_source_terms.jl") +include("euler_vortex_mortar.jl") +include("eulermulti_ec.jl") +include("eulermulti_es.jl") +include("hypdiff_nonperiodic.jl") +include("mhd_alfven_wave_mortar.jl") +include("mhd_alfven_wave.jl") +include("mhd_shock.jl") +include("shallowwater_ec.jl") +include("shallowwater_source_terms.jl") +include("shawllowwater_source_terms_nonperiodic.jl") diff --git a/test/tree_dgsem_3d/advection_basic.jl b/test/tree_dgsem_3d/advection_basic.jl new file mode 100644 index 0000000..be95e0f --- /dev/null +++ b/test/tree_dgsem_3d/advection_basic.jl @@ -0,0 +1,159 @@ +module TestAdvectionBasic3D + +include("../test_macros.jl") + +@testset "Advection Basic 3D" begin + advection_velocity = (0.2, -0.7, 0.5) + equations = LinearScalarAdvectionEquation3D(advection_velocity) + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-1.0, -1.0, -1.0) + coordinates_max = (1.0, 1.0, 1.0) + + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition_convergence_test, + solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/advection_mortar.jl b/test/tree_dgsem_3d/advection_mortar.jl new file mode 100644 index 0000000..ae69c3b --- /dev/null +++ b/test/tree_dgsem_3d/advection_mortar.jl @@ -0,0 +1,162 @@ +module TestAdvectionMortar3D + +include("../test_macros.jl") + +@testset "Advection Mortar 3D" begin + advection_velocity = (0.2, -0.7, 0.5) + equations = LinearScalarAdvectionEquation3D(advection_velocity) + + initial_condition = initial_condition_convergence_test + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (-1.0, -1.0, -1.0) + coordinates_max = (1.0, 1.0, 1.0) + refinement_patches = ((type = "box", coordinates_min = (0.0, -1.0, -1.0), + coordinates_max = (1.0, 1.0, 1.0)), + (type = "box", coordinates_min = (0.0, -0.5, -0.5), + coordinates_max = (0.5, 0.5, 0.5))) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/euler_convergence.jl b/test/tree_dgsem_3d/euler_convergence.jl new file mode 100644 index 0000000..fa22e6a --- /dev/null +++ b/test/tree_dgsem_3d/euler_convergence.jl @@ -0,0 +1,160 @@ +module TestEulerConvergence3D + +include("../test_macros.jl") + +@testset "Euler Convergence 3D" begin + equations = CompressibleEulerEquations3D(2.0) + + initial_condition = initial_condition_eoc_test_coupled_euler_gravity + + solver = DGSEM(polydeg = 3, surface_flux = flux_hll, + volume_integral = VolumeIntegralWeakForm()) + + coordinates_min = (0.0, 0.0, 0.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_eoc_test_euler) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_eoc_test_euler) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/euler_ec.jl b/test/tree_dgsem_3d/euler_ec.jl new file mode 100644 index 0000000..bf327d6 --- /dev/null +++ b/test/tree_dgsem_3d/euler_ec.jl @@ -0,0 +1,159 @@ +module TestEulerEC3D + +include("../test_macros.jl") + +@testset "Euler EC 3D" begin + equations = CompressibleEulerEquations3D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = flux_ranocha + solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0, -2.0, -2.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 100_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/euler_mortar.jl b/test/tree_dgsem_3d/euler_mortar.jl new file mode 100644 index 0000000..7a46f66 --- /dev/null +++ b/test/tree_dgsem_3d/euler_mortar.jl @@ -0,0 +1,161 @@ +module TestEulerMortar3D + +include("../test_macros.jl") + +@testset "Euler Mortar 3D" begin + equations = CompressibleEulerEquations3D(1.4) + + initial_condition = initial_condition_convergence_test + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0, 0.0) + coordinates_max = (2.0, 2.0, 2.0) + refinement_patches = ((type = "box", coordinates_min = (0.5, 0.5, 0.5), + coordinates_max = (1.5, 1.5, 1.5)),) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/euler_shock.jl b/test/tree_dgsem_3d/euler_shock.jl new file mode 100644 index 0000000..96d2bcd --- /dev/null +++ b/test/tree_dgsem_3d/euler_shock.jl @@ -0,0 +1,170 @@ +module TestEulerShock3D + +include("../test_macros.jl") + +@testset "Euler Shock 3D" begin + equations = CompressibleEulerEquations3D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + surface_flux = flux_ranocha + volume_flux = flux_ranocha + + polydeg = 3 + basis = LobattoLegendreBasis(polydeg) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + solver = DGSEM(basis, surface_flux, volume_integral) + + coordinates_min = (-2.0, -2.0, -2.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 100_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/euler_source_terms.jl b/test/tree_dgsem_3d/euler_source_terms.jl new file mode 100644 index 0000000..5e707cb --- /dev/null +++ b/test/tree_dgsem_3d/euler_source_terms.jl @@ -0,0 +1,160 @@ +module TestEulerSourceTerms3D + +include("../test_macros.jl") + +@testset "Euler Source Terms 3D" begin + equations = CompressibleEulerEquations3D(1.4) + + initial_condition = initial_condition_convergence_test + + solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + + coordinates_min = (0.0, 0.0, 0.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/hypdiff_nonperiodic.jl b/test/tree_dgsem_3d/hypdiff_nonperiodic.jl new file mode 100644 index 0000000..70533d5 --- /dev/null +++ b/test/tree_dgsem_3d/hypdiff_nonperiodic.jl @@ -0,0 +1,168 @@ +module TestHypdiffNonperiodic3D + +include("../test_macros.jl") + +@testset "Hypdiff Nonperiodic 3D" begin + equations = HyperbolicDiffusionEquations3D() + + initial_condition = initial_condition_poisson_nonperiodic + boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic, + z_neg = boundary_condition_periodic, + z_pos = boundary_condition_periodic) + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0, 0.0) + coordinates_max = (1.0, 1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 30_000, + periodicity = (false, true, true)) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_poisson_nonperiodic, + boundary_conditions = boundary_conditions) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver, + source_terms = source_terms_poisson_nonperiodic, + boundary_conditions = boundary_conditions) + + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/mhd_alfven_wave.jl b/test/tree_dgsem_3d/mhd_alfven_wave.jl new file mode 100644 index 0000000..7c92ba5 --- /dev/null +++ b/test/tree_dgsem_3d/mhd_alfven_wave.jl @@ -0,0 +1,160 @@ +module TestMHDAlfvenWave3D + +include("../test_macros.jl") + +@testset "MHD Alfven Wave 3D" begin + equations = IdealGlmMhdEquations3D(5 / 3) + + initial_condition = initial_condition_convergence_test + + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_lax_friedrichs, flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-1.0, -1.0, -1.0) + coordinates_max = (1.0, 1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/mhd_alfven_wave_mortar.jl b/test/tree_dgsem_3d/mhd_alfven_wave_mortar.jl new file mode 100644 index 0000000..ccf0cbd --- /dev/null +++ b/test/tree_dgsem_3d/mhd_alfven_wave_mortar.jl @@ -0,0 +1,164 @@ +module TestMHDAlfvenWaveMortar3D + +include("../test_macros.jl") + +@testset "MHD Alfven Wave Mortar 3D" begin + equations = IdealGlmMhdEquations3D(5 / 3) + + initial_condition = initial_condition_convergence_test + + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_hlle, + flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-1.0, -1.0, -1.0) + coordinates_max = (1.0, 1.0, 1.0) + refinement_patches = ((type = "box", coordinates_min = (-0.5, -0.5, -0.5), + coordinates_max = (0.5, 0.5, 0.5)),) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + refinement_patches = refinement_patches, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/mhd_ec.jl b/test/tree_dgsem_3d/mhd_ec.jl new file mode 100644 index 0000000..e92a79f --- /dev/null +++ b/test/tree_dgsem_3d/mhd_ec.jl @@ -0,0 +1,160 @@ +module TestMHDEC3D + +include("../test_macros.jl") + +@testset "MHD EC 3D" begin + equations = IdealGlmMhdEquations3D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + solver = DGSEM(polydeg = 3, + surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + + coordinates_min = (-2.0, -2.0, -2.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 0.4) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/mhd_shock.jl b/test/tree_dgsem_3d/mhd_shock.jl new file mode 100644 index 0000000..a3f0a8c --- /dev/null +++ b/test/tree_dgsem_3d/mhd_shock.jl @@ -0,0 +1,171 @@ +module TestMHDShock3D + +include("../test_macros.jl") + +@testset "MHD Shock 3D" begin + equations = IdealGlmMhdEquations3D(1.4) + + initial_condition = initial_condition_weak_blast_wave + + surface_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + volume_flux = (flux_hindenlang_gassner, flux_nonconservative_powell) + polydeg = 4 + basis = LobattoLegendreBasis(polydeg) + indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) + volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + + solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + + coordinates_min = (-2.0, -2.0, -2.0) + coordinates_max = (2.0, 2.0, 2.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 10_000) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + semi_gpu = SemidiscretizationHyperbolicGPU(mesh, equations, initial_condition, solver) + + tspan = (0.0, 1.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + + # Get CPU data + t = 0.0 + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Get GPU data + t_gpu = 0.0 + equations_gpu = semi_gpu.equations + mesh_gpu, solver_gpu, cache_gpu = semi_gpu.mesh, semi_gpu.solver, semi_gpu.cache + initial_condition_gpu = semi_gpu.initial_condition + boundary_conditions_gpu = semi_gpu.boundary_conditions + source_terms_gpu = semi_gpu.source_terms + u_gpu = CuArray(u) + du_gpu = CuArray(du) + + # Begin tests + @testset "Semidiscretization Process" begin + @testset "Copy to GPU" begin + du_gpu, u_gpu = TrixiCUDA.copy_to_gpu!(du, u) + Trixi.reset_du!(du, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Volume Integral" begin + TrixiCUDA.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu, + cache_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Prolong Interfaces" begin + TrixiCUDA.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.interfaces.u, cache.interfaces.u) + @test_equal (u_gpu, u) + end + + @testset "Interface Flux" begin + TrixiCUDA.cuda_interface_flux!(mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong Boundaries" begin + TrixiCUDA.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + equations_gpu, cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test_approx (cache_gpu.boundaries.u, cache.boundaries.u) + @test_equal (u_gpu, u) + end + + @testset "Boundary Flux" begin + TrixiCUDA.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Prolong mortars" begin + TrixiCUDA.cuda_prolong2mortars!(u_gpu, mesh_gpu, + TrixiCUDA.check_cache_mortars(cache_gpu), + solver_gpu, cache_gpu) + Trixi.prolong2mortars!(cache, u, mesh, equations, + solver.mortar, solver.surface_integral, solver) + @test_approx (cache_gpu.mortars.u_upper_left, cache.mortars.u_upper_left) + @test_approx (cache_gpu.mortars.u_upper_right, cache.mortars.u_upper_right) + @test_approx (cache_gpu.mortars.u_lower_left, cache.mortars.u_lower_left) + @test_approx (cache_gpu.mortars.u_lower_right, cache.mortars.u_lower_right) + @test_equal (u_gpu, u) + end + + @testset "Mortar Flux" begin + TrixiCUDA.cuda_mortar_flux!(mesh_gpu, TrixiCUDA.check_cache_mortars(cache_gpu), + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_mortar_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.mortar, solver.surface_integral, solver, cache) + @test_approx (cache_gpu.elements.surface_flux_values, + cache.elements.surface_flux_values) + @test_equal (u_gpu, u) + end + + @testset "Surface Integral" begin + TrixiCUDA.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, + solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Jacobian" begin + TrixiCUDA.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Apply Sources" begin + TrixiCUDA.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, + equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test_approx (du_gpu, du) + @test_equal (u_gpu, u) + end + + @testset "Copy to CPU" begin + du_cpu, u_cpu = TrixiCUDA.copy_to_cpu!(du_gpu, u_gpu) + @test_approx (du_cpu, du) + @test_equal (u_cpu, u) + end + end +end + +end # module diff --git a/test/tree_dgsem_3d/tree_dgsem_3d.jl b/test/tree_dgsem_3d/tree_dgsem_3d.jl new file mode 100644 index 0000000..3b11459 --- /dev/null +++ b/test/tree_dgsem_3d/tree_dgsem_3d.jl @@ -0,0 +1,12 @@ +include("advection_basic.jl") +include("advection_mortar.jl") +include("euler_convergence.jl") +include("euler_ec.jl") +include("euler_mortar.jl") +include("euler_shock.jl") +include("euler_source_terms.jl") +include("hypdiff_nonperiodic.jl") +include("mhd_alfven_wave_mortar.jl") +include("mhd_alfven_wave.jl") +include("mhd_ec.jl") +include("mhd_shock.jl")