diff --git a/examples/hypdiff_nonperiodic_1d.jl b/examples/hypdiff_nonperiodic_1d.jl new file mode 100644 index 00000000..6488efda --- /dev/null +++ b/examples/hypdiff_nonperiodic_1d.jl @@ -0,0 +1,63 @@ +using Trixi, TrixiGPU +using OrdinaryDiffEq + +# The example is taken from the Trixi.jl + +############################################################################### +# semidiscretization of the hyperbolic diffusion equations + +equations = HyperbolicDiffusionEquations1D() + +initial_condition = initial_condition_poisson_nonperiodic + +boundary_conditions = boundary_condition_poisson_nonperiodic + +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +coordinates_min = 0.0 +coordinates_max = 1.0 +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = false) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize_gpu(semi, tspan) # from TrixiGPU.jl + +summary_callback = SummaryCallback() + +resid_tol = 5.0e-12 +steady_state_callback = SteadyStateCallback(abstol = resid_tol, reltol = 0.0) + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy, energy_total)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, steady_state_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/hypdiff_nonperiodic_2d.jl b/examples/hypdiff_nonperiodic_2d.jl new file mode 100644 index 00000000..8b76594f --- /dev/null +++ b/examples/hypdiff_nonperiodic_2d.jl @@ -0,0 +1,65 @@ +using Trixi, TrixiGPU +using OrdinaryDiffEq + +# The example is taken from the Trixi.jl + +############################################################################### +# semidiscretization of the hyperbolic diffusion equations + +equations = HyperbolicDiffusionEquations2D() + +initial_condition = initial_condition_poisson_nonperiodic + +boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic) + +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +coordinates_min = (0.0, 0.0) +coordinates_max = (1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = (false, true)) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize_gpu(semi, tspan) # from TrixiGPU.jl + +summary_callback = SummaryCallback() + +resid_tol = 5.0e-12 +steady_state_callback = SteadyStateCallback(abstol = resid_tol, reltol = 0.0) + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, steady_state_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/hypdiff_nonperiodic_3d.jl b/examples/hypdiff_nonperiodic_3d.jl new file mode 100644 index 00000000..99cd03c7 --- /dev/null +++ b/examples/hypdiff_nonperiodic_3d.jl @@ -0,0 +1,67 @@ +using Trixi, TrixiGPU +using OrdinaryDiffEq + +# The example is taken from the Trixi.jl + +############################################################################### +# semidiscretization of the hyperbolic diffusion equations + +equations = HyperbolicDiffusionEquations3D() + +initial_condition = initial_condition_poisson_nonperiodic +boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic, + z_neg = boundary_condition_periodic, + z_pos = boundary_condition_periodic) + +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +coordinates_min = (0.0, 0.0, 0.0) +coordinates_max = (1.0, 1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 30_000, + periodicity = (false, true, true)) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_poisson_nonperiodic, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize_gpu(semi, tspan) # from TrixiGPU.jl + +summary_callback = SummaryCallback() + +resid_tol = 1.0e-5 +steady_state_callback = SteadyStateCallback(abstol = resid_tol, reltol = 0.0) + +analysis_interval = 200 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy, energy_total)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.8) + +callbacks = CallbackSet(summary_callback, steady_state_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = Trixi.solve(ode, Trixi.HypDiffN3Erk3Sstar52(), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks) +summary_callback() # print the timer summary diff --git a/test/test_advection_basic.jl b/test/test_advection_basic.jl index 083587d7..d564903d 100644 --- a/test/test_advection_basic.jl +++ b/test/test_advection_basic.jl @@ -19,7 +19,7 @@ isdir(outdir) && rm(outdir, recursive = true) # should at least satisfy this error bound. # Test precision of the semidiscretization process -@testset "Test Linear Advection Equation" begin +@testset "Test Linear Advection Basic" begin @testset "Linear Advection 1D" begin advection_velocity = 1.0 equations = LinearScalarAdvectionEquation1D(advection_velocity) @@ -42,7 +42,7 @@ isdir(outdir) && rm(outdir, recursive = true) boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 1.0) ode = semidiscretize(semi, tspan) @@ -59,24 +59,20 @@ isdir(outdir) && rm(outdir, recursive = true) # Test `cuda_volume_integral!` TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, - solver_gpu) - Trixi.calc_volume_integral!(du, u, mesh, - Trixi.have_nonconservative_terms(equations), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), equations, solver.volume_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2interfaces!` TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_interface_flux!` - TrixiGPU.cuda_interface_flux!(mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, solver_gpu, cache_gpu) Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, Trixi.have_nonconservative_terms(equations), equations, @@ -85,15 +81,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -101,10 +96,8 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_surface_integral!` - TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, - cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, - solver, cache) + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -115,7 +108,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -146,7 +139,7 @@ isdir(outdir) && rm(outdir, recursive = true) boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 1.0) ode = semidiscretize(semi, tspan) @@ -163,24 +156,20 @@ isdir(outdir) && rm(outdir, recursive = true) # Test `cuda_volume_integral!` TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, - solver_gpu) - Trixi.calc_volume_integral!(du, u, mesh, - Trixi.have_nonconservative_terms(equations), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), equations, solver.volume_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2interfaces!` TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_interface_flux!` - TrixiGPU.cuda_interface_flux!(mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, solver_gpu, cache_gpu) Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, Trixi.have_nonconservative_terms(equations), equations, @@ -189,15 +178,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -205,10 +193,8 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_surface_integral!` - TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, - cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, - solver, cache) + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -219,7 +205,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -250,7 +236,7 @@ isdir(outdir) && rm(outdir, recursive = true) boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 1.0) ode = semidiscretize(semi, tspan) @@ -267,24 +253,20 @@ isdir(outdir) && rm(outdir, recursive = true) # Test `cuda_volume_integral!` TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), - equations_gpu, solver_gpu.volume_integral, - solver_gpu) - Trixi.calc_volume_integral!(du, u, mesh, - Trixi.have_nonconservative_terms(equations), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), equations, solver.volume_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2interfaces!` TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) - Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_interface_flux!` - TrixiGPU.cuda_interface_flux!(mesh_gpu, - Trixi.have_nonconservative_terms(equations_gpu), + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), equations_gpu, solver_gpu, cache_gpu) Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, Trixi.have_nonconservative_terms(equations), equations, @@ -293,15 +275,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, cache_gpu) - Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, - solver) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -309,10 +290,8 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_surface_integral!` - TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, - cache_gpu) - Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, - solver, cache) + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -323,7 +302,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu diff --git a/test/test_euler_ec.jl b/test/test_euler_ec.jl index 884e1cfc..aed1966c 100644 --- a/test/test_euler_ec.jl +++ b/test/test_euler_ec.jl @@ -1,4 +1,4 @@ -module TestCompressibleEuler +module TestCompressibleEulerFluxDifferencing using Trixi, TrixiGPU using OrdinaryDiffEq @@ -19,7 +19,7 @@ isdir(outdir) && rm(outdir, recursive = true) # should at least satisfy this error bound. # Test precision of the semidiscretization process -@testset "Test Compressible Euler Equation" begin +@testset "Test Compressible Euler Flux Differencing" begin @testset "Compressible Euler 1D" begin equations = CompressibleEulerEquations1D(1.4) @@ -42,7 +42,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 0.4) ode = semidiscretize(semi, tspan) @@ -81,13 +81,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -107,7 +108,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -139,7 +140,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 0.4) ode = semidiscretize(semi, tspan) @@ -178,13 +179,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -204,7 +206,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -235,7 +237,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 0.4) ode = semidiscretize(semi, tspan) @@ -274,13 +276,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -290,22 +293,19 @@ isdir(outdir) && rm(outdir, recursive = true) # Test `cuda_surface_integral!` TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) - # @test_broken CUDA.@allowscalar du ≈ du_gpu - @test CUDA.@allowscalar isapprox(du, du_gpu, rtol = eps(Float64)^(1 / 3)) + @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_jacobian!` TrixiGPU.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) Trixi.apply_jacobian!(du, mesh, equations, solver, cache) - # @test_broken CUDA.@allowscalar du ≈ du_gpu - @test CUDA.@allowscalar isapprox(du, du_gpu, rtol = eps(Float64)^(1 / 3)) + @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - # @test_broken CUDA.@allowscalar du ≈ du_gpu - @test CUDA.@allowscalar isapprox(du, du_gpu, rtol = eps(Float64)^(1 / 3)) + @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Copy data back to host diff --git a/test/test_euler_source_terms.jl b/test/test_euler_source_terms.jl index 45c603df..e709b657 100644 --- a/test/test_euler_source_terms.jl +++ b/test/test_euler_source_terms.jl @@ -19,7 +19,7 @@ isdir(outdir) && rm(outdir, recursive = true) # should at least satisfy this error bound. # Test precision of the semidiscretization process -@testset "Test Compressible Euler Equation" begin +@testset "Test Compressible Euler Source Terms" begin @testset "Compressible Euler 1D" begin equations = CompressibleEulerEquations1D(1.4) @@ -42,7 +42,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 2.0) ode = semidiscretize(semi, tspan) @@ -81,13 +81,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -107,7 +108,7 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu @@ -137,7 +138,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 2.0) ode = semidiscretize(semi, tspan) @@ -176,13 +177,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -202,10 +204,9 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - # @test_broken CUDA.@allowscalar du ≈ du_gpu - @test CUDA.@allowscalar isapprox(du, du_gpu, rtol = eps(Float64)^(1 / 3)) + @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Copy data back to host @@ -235,7 +236,7 @@ isdir(outdir) && rm(outdir, recursive = true) initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache - t = 0.0 + t = t_gpu = 0.0 tspan = (0.0, 5.0) ode = semidiscretize(semi, tspan) @@ -274,13 +275,14 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_prolong2boundaries!` - TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, cache_gpu) + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_boundary_flux!` - TrixiGPU.cuda_boundary_flux!(t, mesh_gpu, boundary_conditions_gpu, equations_gpu, + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, solver_gpu, cache_gpu) Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, solver.surface_integral, solver) @@ -300,10 +302,9 @@ isdir(outdir) && rm(outdir, recursive = true) @test CUDA.@allowscalar u ≈ u_gpu # Test `cuda_sources!` - TrixiGPU.cuda_sources!(du_gpu, u_gpu, t, source_terms_gpu, equations_gpu, cache_gpu) + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) - # @test_broken CUDA.@allowscalar du ≈ du_gpu - @test CUDA.@allowscalar isapprox(du, du_gpu, rtol = eps(Float64)^(1 / 3)) + @test CUDA.@allowscalar du ≈ du_gpu @test CUDA.@allowscalar u ≈ u_gpu # Copy data back to host diff --git a/test/test_hypdiff_nonperiodic.jl b/test/test_hypdiff_nonperiodic.jl new file mode 100644 index 00000000..f996d645 --- /dev/null +++ b/test/test_hypdiff_nonperiodic.jl @@ -0,0 +1,334 @@ +module TestHyperbolicDiffusionBoundaryConditions + +using Trixi, TrixiGPU +using OrdinaryDiffEq +using Test, CUDA + +# Start testing with a clean environment +outdir = "out" +isdir(outdir) && rm(outdir, recursive = true) + +# Note that it is complicated to get tight error bounds for GPU kernels, so here we adopt +# a relaxed error bound for the tests. Specifically, we use `isapprox` with the default mode, +# i.e., `rtol = eps(Float64)^(1/2)`, to validate the precision by comparing the `Float64` +# results from GPU kernels and CPU kernels, which corresponds to requiring equality of about +# half of the significant digits (see https://docs.julialang.org/en/v1/base/math/#Base.isapprox). + +# Basically, this heuristic method first checks whether the relaxed error bound (sometimes +# it is further relaxed) is satisfied. Any new methods and optimizations introduced later +# should at least satisfy this error bound. + +# Test precision of the semidiscretization process +@testset "Test Hyperbolic Diffusion Boundary Conditions" begin + @testset "Compressible Euler 1D" begin + equations = HyperbolicDiffusionEquations1D() + + initial_condition = initial_condition_poisson_nonperiodic + + boundary_conditions = boundary_condition_poisson_nonperiodic + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = 0.0 + coordinates_max = 1.0 + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = false) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + + # Get copy for GPU to avoid overwriting during tests + mesh_gpu, equations_gpu = mesh, equations + initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions + source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache + + t = t_gpu = 0.0 + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Copy data to device + du_gpu, u_gpu = TrixiGPU.copy_to_device!(du, u) + # Reset data on host + Trixi.reset_du!(du, solver, cache) + + # Test `cuda_volume_integral!` + TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2interfaces!` + TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_interface_flux!` + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2boundaries!` + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_boundary_flux!` + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_surface_integral!` + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_jacobian!` + TrixiGPU.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_sources!` + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Copy data back to host + du_cpu, u_cpu = TrixiGPU.copy_to_host!(du_gpu, u_gpu) + end + + @testset "Compressible Euler 2D" begin + equations = HyperbolicDiffusionEquations2D() + + initial_condition = initial_condition_poisson_nonperiodic + + boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic) + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0) + coordinates_max = (1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 3, + n_cells_max = 30_000, + periodicity = (false, true)) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions, + source_terms = source_terms_poisson_nonperiodic) + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + + # Get copy for GPU to avoid overwriting during tests + mesh_gpu, equations_gpu = mesh, equations + initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions + source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache + + t = t_gpu = 0.0 + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Copy data to device + du_gpu, u_gpu = TrixiGPU.copy_to_device!(du, u) + # Reset data on host + Trixi.reset_du!(du, solver, cache) + + # Test `cuda_volume_integral!` + TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2interfaces!` + TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_interface_flux!` + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2boundaries!` + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_boundary_flux!` + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_surface_integral!` + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_jacobian!` + TrixiGPU.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_sources!` + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Copy data back to host + du_cpu, u_cpu = TrixiGPU.copy_to_host!(du_gpu, u_gpu) + end + + @testset "Compressible Euler 3D" begin + equations = HyperbolicDiffusionEquations3D() + + initial_condition = initial_condition_poisson_nonperiodic + boundary_conditions = (x_neg = boundary_condition_poisson_nonperiodic, + x_pos = boundary_condition_poisson_nonperiodic, + y_neg = boundary_condition_periodic, + y_pos = boundary_condition_periodic, + z_neg = boundary_condition_periodic, + z_pos = boundary_condition_periodic) + + solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + + coordinates_min = (0.0, 0.0, 0.0) + coordinates_max = (1.0, 1.0, 1.0) + mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + n_cells_max = 30_000, + periodicity = (false, true, true)) + + semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_poisson_nonperiodic, + boundary_conditions = boundary_conditions) + (; mesh, equations, initial_condition, boundary_conditions, source_terms, solver, cache) = semi + + # Get copy for GPU to avoid overwriting during tests + mesh_gpu, equations_gpu = mesh, equations + initial_condition_gpu, boundary_conditions_gpu = initial_condition, boundary_conditions + source_terms_gpu, solver_gpu, cache_gpu = source_terms, solver, cache + + t = t_gpu = 0.0 + tspan = (0.0, 5.0) + + ode = semidiscretize(semi, tspan) + u_ode = copy(ode.u0) + du_ode = similar(u_ode) + u = Trixi.wrap_array(u_ode, mesh, equations, solver, cache) + du = Trixi.wrap_array(du_ode, mesh, equations, solver, cache) + + # Copy data to device + du_gpu, u_gpu = TrixiGPU.copy_to_device!(du, u) + # Reset data on host + Trixi.reset_du!(du, solver, cache) + + # Test `cuda_volume_integral!` + TrixiGPU.cuda_volume_integral!(du_gpu, u_gpu, mesh_gpu, + Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu.volume_integral, solver_gpu) + Trixi.calc_volume_integral!(du, u, mesh, Trixi.have_nonconservative_terms(equations), + equations, solver.volume_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2interfaces!` + TrixiGPU.cuda_prolong2interfaces!(u_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.prolong2interfaces!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_interface_flux!` + TrixiGPU.cuda_interface_flux!(mesh_gpu, Trixi.have_nonconservative_terms(equations_gpu), + equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_interface_flux!(cache.elements.surface_flux_values, mesh, + Trixi.have_nonconservative_terms(equations), equations, + solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_prolong2boundaries!` + TrixiGPU.cuda_prolong2boundaries!(u_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + cache_gpu) + Trixi.prolong2boundaries!(cache, u, mesh, equations, solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_boundary_flux!` + TrixiGPU.cuda_boundary_flux!(t_gpu, mesh_gpu, boundary_conditions_gpu, equations_gpu, + solver_gpu, cache_gpu) + Trixi.calc_boundary_flux!(cache, t, boundary_conditions, mesh, equations, + solver.surface_integral, solver) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_surface_integral!` + TrixiGPU.cuda_surface_integral!(du_gpu, mesh_gpu, equations_gpu, solver_gpu, cache_gpu) + Trixi.calc_surface_integral!(du, u, mesh, equations, solver.surface_integral, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_jacobian!` + TrixiGPU.cuda_jacobian!(du_gpu, mesh_gpu, equations_gpu, cache_gpu) + Trixi.apply_jacobian!(du, mesh, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Test `cuda_sources!` + TrixiGPU.cuda_sources!(du_gpu, u_gpu, t_gpu, source_terms_gpu, equations_gpu, cache_gpu) + Trixi.calc_sources!(du, u, t, source_terms, equations, solver, cache) + @test CUDA.@allowscalar du ≈ du_gpu + @test CUDA.@allowscalar u ≈ u_gpu + + # Copy data back to host + du_cpu, u_cpu = TrixiGPU.copy_to_host!(du_gpu, u_gpu) + end +end + +end # module