diff --git a/NEWS.md b/NEWS.md index bc213fcea55..f012de09786 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,22 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes when updating to v0.6 from v0.5.x + +#### Added + +#### Changed + +#### Deprecated + +#### Removed + +- The neural network-based shock indicators have been migrated to a new repository + [TrixiSmartShockFinder.jl](https://github.com/trixi-framework/TrixiSmartShockFinder.jl). + To continue using the indicators, you will need to use both Trixi.jl and + TrixiSmartShockFinder.jl, as explained in the latter packages' `README.md`. + + ## Changes in the v0.5 lifecycle #### Added @@ -31,6 +47,9 @@ for human readability. #### Removed +- Migrate neural network-based shock indicators to a new repository + [TrixiSmartShockFinder.jl](https://github.com/trixi-framework/TrixiSmartShockFinder.jl). + ## Changes when updating to v0.5 from v0.4.x diff --git a/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl b/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl deleted file mode 100644 index 05f392624fd..00000000000 --- a/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl +++ /dev/null @@ -1,109 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelnnpp-0.97-0.0001.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnpp-0.97-0.0001.bson", - network) -model1d = load(network, @__MODULE__)[:model1d] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations - -equations = CompressibleEulerEquations1D(1.4) - -""" - initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations1D) - -A medium blast wave taken from -- Sebastian Hennemann, Gregor J. Gassner (2020) - A provably entropy stable subcell shock capturing approach for high order split form DG - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations1D) - # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave" - # Set up polar coordinates - inicenter = SVector(0.0) - x_norm = x[1] - inicenter[1] - r = abs(x_norm) - # The following code is equivalent to - # phi = atan(0.0, x_norm) - # cos_phi = cos(phi) - # in 1D but faster - cos_phi = x_norm > 0 ? one(x_norm) : -one(x_norm) - - # Calculate primitive variables - rho = r > 0.5 ? 1.0 : 1.1691 - v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi - p = r > 0.5 ? 1.0E-3 : 1.245 - - return prim2cons(SVector(rho, v1, p), equations) -end -initial_condition = initial_condition_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = false, - alpha_amr = false, - variable = density_pressure, - network = model1d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0,) -coordinates_max = (2.0,) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 - -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -stepsize_callback = StepsizeCallback(cfl = 0.5) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl b/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl deleted file mode 100644 index de2f5134a49..00000000000 --- a/examples/tree_1d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl +++ /dev/null @@ -1,109 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelnnrh-0.95-0.009.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnrh-0.95-0.009.bson", - network) -model1d = load(network, @__MODULE__)[:model1d] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations - -equations = CompressibleEulerEquations1D(1.4) - -""" - initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations1D) - -A medium blast wave taken from -- Sebastian Hennemann, Gregor J. Gassner (2020) - A provably entropy stable subcell shock capturing approach for high order split form DG - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations1D) - # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave" - # Set up polar coordinates - inicenter = SVector(0.0) - x_norm = x[1] - inicenter[1] - r = abs(x_norm) - # The following code is equivalent to - # phi = atan(0.0, x_norm) - # cos_phi = cos(phi) - # in 1D but faster - cos_phi = x_norm > 0 ? one(x_norm) : -one(x_norm) - - # Calculate primitive variables - rho = r > 0.5 ? 1.0 : 1.1691 - v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi - p = r > 0.5 ? 1.0E-3 : 1.245 - - return prim2cons(SVector(rho, v1, p), equations) -end -initial_condition = initial_condition_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkRayHesthaven(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = false, - alpha_amr = false, - variable = density_pressure, - network = model1d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0,) -coordinates_max = (2.0,) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 - -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -stepsize_callback = StepsizeCallback(cfl = 0.5) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_cnn.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_cnn.jl deleted file mode 100644 index 79d7474dc66..00000000000 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_cnn.jl +++ /dev/null @@ -1,107 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelcnn-0.964-0.001.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelcnn-0.964-0.001.bson", - network) -model2dcnn = load(network, @__MODULE__)[:model2dcnn] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations - -equations = CompressibleEulerEquations2D(1.4) - -""" - initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - -A medium blast wave taken from -- Sebastian Hennemann, Gregor J. Gassner (2020) - A provably entropy stable subcell shock capturing approach for high order split form DG - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave" - # Set up polar coordinates - inicenter = SVector(0.0, 0.0) - x_norm = x[1] - inicenter[1] - y_norm = x[2] - inicenter[2] - r = sqrt(x_norm^2 + y_norm^2) - phi = atan(y_norm, x_norm) - sin_phi, cos_phi = sincos(phi) - - # Calculate primitive variables - rho = r > 0.5 ? 1.0 : 1.1691 - v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi - v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi - p = r > 0.5 ? 1.0E-3 : 1.245 - - return prim2cons(SVector(rho, v1, v2, p), equations) -end -initial_condition = initial_condition_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkCNN(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable = density_pressure, - network = model2dcnn) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -stepsize_callback = StepsizeCallback(cfl = 0.9) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl deleted file mode 100644 index 27398593efd..00000000000 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl +++ /dev/null @@ -1,107 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelnnpp-0.904-0.0005.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnpp-0.904-0.0005.bson", - network) -model2d = load(network, @__MODULE__)[:model2d] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations - -equations = CompressibleEulerEquations2D(1.4) - -""" - initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - -A medium blast wave taken from -- Sebastian Hennemann, Gregor J. Gassner (2020) - A provably entropy stable subcell shock capturing approach for high order split form DG - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave" - # Set up polar coordinates - inicenter = SVector(0.0, 0.0) - x_norm = x[1] - inicenter[1] - y_norm = x[2] - inicenter[2] - r = sqrt(x_norm^2 + y_norm^2) - phi = atan(y_norm, x_norm) - sin_phi, cos_phi = sincos(phi) - - # Calculate primitive variables - rho = r > 0.5 ? 1.0 : 1.1691 - v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi - v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi - p = r > 0.5 ? 1.0E-3 : 1.245 - - return prim2cons(SVector(rho, v1, v2, p), equations) -end -initial_condition = initial_condition_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable = density_pressure, - network = model2d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -stepsize_callback = StepsizeCallback(cfl = 0.9) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl b/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl deleted file mode 100644 index 6c67f948636..00000000000 --- a/examples/tree_2d_dgsem/elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl +++ /dev/null @@ -1,110 +0,0 @@ -using Downloads: download -using Flux -using Random -using BSON: load -network = joinpath(@__DIR__, "modelnnrhs-0.973-0.001.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnrhs-0.973-0.001.bson", - network) -model2d = load(network, @__MODULE__)[:model2d] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations - -equations = CompressibleEulerEquations2D(1.4) - -""" - initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - -A medium blast wave taken from -- Sebastian Hennemann, Gregor J. Gassner (2020) - A provably entropy stable subcell shock capturing approach for high order split form DG - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -function initial_condition_blast_wave(x, t, equations::CompressibleEulerEquations2D) - # Modified From Hennemann & Gassner JCP paper 2020 (Sec. 6.3) -> "medium blast wave" - # Set up polar coordinates - inicenter = SVector(0.0, 0.0) - x_norm = x[1] - inicenter[1] - y_norm = x[2] - inicenter[2] - r = sqrt(x_norm^2 + y_norm^2) - phi = atan(y_norm, x_norm) - sin_phi, cos_phi = sincos(phi) - - # Calculate primitive variables - rho = r > 0.5 ? 1.0 : 1.1691 - v1 = r > 0.5 ? 0.0 : 0.1882 * cos_phi - v2 = r > 0.5 ? 0.0 : 0.1882 * sin_phi - p = r > 0.5 ? 1.0E-3 : 1.245 - - return prim2cons(SVector(rho, v1, v2, p), equations) -end -initial_condition = initial_condition_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkRayHesthaven(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable = density_pressure, - network = model2d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) -refinement_patches = () # To allow for specifying them via `trixi_include` -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - refinement_patches = refinement_patches, - n_cells_max = 10_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -stepsize_callback = StepsizeCallback(cfl = 0.9) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr_neuralnetwork_perssonperaire.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr_neuralnetwork_perssonperaire.jl deleted file mode 100644 index d2cab04223e..00000000000 --- a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_amr_neuralnetwork_perssonperaire.jl +++ /dev/null @@ -1,134 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelnnpp-0.904-0.0005.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnpp-0.904-0.0005.bson", - network) -model2d = load(network, @__MODULE__)[:model2d] - -using Random: seed! -seed!(0) - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations -gamma = 1.4 -equations = CompressibleEulerEquations2D(gamma) - -""" - initial_condition_kelvin_helmholtz_instability(x, t, equations::CompressibleEulerEquations2D) - -A version of the classical Kelvin-Helmholtz instability based on -https://rsaa.anu.edu.au/research/established-projects/fyris/2-d-kelvin-helmholtz-test. -""" -function initial_condition_kelvin_helmholtz_instability(x, t, - equations::CompressibleEulerEquations2D) - # change discontinuity to tanh - # typical resolution 128^2, 256^2 - # domain size is [-0.5,0.5]^2 - dens0 = 1.0 # outside density - dens1 = 2.0 # inside density - velx0 = -0.5 # outside velocity - velx1 = 0.5 # inside velocity - slope = 50 # used for tanh instead of discontinuous initial condition - # pressure equilibrium - p = 2.5 - # y velocity v2 is only white noise - v2 = 0.01 * (rand(Float64, 1)[1] - 0.5) - # density - rho = dens0 + - (dens1 - dens0) * 0.5 * - (1 + (tanh(slope * (x[2] + 0.25)) - (tanh(slope * (x[2] - 0.25)) + 1))) - # x velocity is also augmented with noise - v1 = velx0 + - (velx1 - velx0) * 0.5 * - (1 + (tanh(slope * (x[2] + 0.25)) - (tanh(slope * (x[2] - 0.25)) + 1))) + - 0.01 * (rand(Float64, 1)[1] - 0.5) - return prim2cons(SVector(rho, v1, v2, p), equations) -end -initial_condition = initial_condition_kelvin_helmholtz_instability - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -polydeg = 3 -basis = LobattoLegendreBasis(polydeg) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 0.002, - alpha_min = 0.0001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable = density_pressure, - network = model2d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-0.5, -0.5) -coordinates_max = (0.5, 0.5) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 100_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 5.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -amr_indicator = IndicatorNeuralNetwork(semi, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 1.0, - alpha_min = 0.0001, - alpha_smooth = false, - alpha_continuous = true, - alpha_amr = true, - variable = density_pressure, - network = model2d) -amr_controller = ControllerThreeLevel(semi, amr_indicator, - base_level = 4, - med_level = 6, med_threshold = 0.3, # med_level = current level - max_level = 7, max_threshold = 0.5) -amr_callback = AMRCallback(semi, amr_controller, - interval = 1, - adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) - -stepsize_callback = StepsizeCallback(cfl = 1.3) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - amr_callback, stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl b/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl deleted file mode 100644 index 39ea947f872..00000000000 --- a/examples/tree_2d_dgsem/elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl +++ /dev/null @@ -1,126 +0,0 @@ -using Downloads: download -using Flux -using BSON: load -network = joinpath(@__DIR__, "modelnnpp-0.904-0.0005.bson") -download("https://github.com/trixi-framework/Trixi_IndicatorNeuralNetwork_networks/raw/main/networks/modelnnpp-0.904-0.0005.bson", - network) -model2d = load(network, @__MODULE__)[:model2d] - -using OrdinaryDiffEq -using Trixi - -# This elixir was one of the setups used in the following master thesis: -# - Julia Odenthal (2021) -# Shock capturing with artificial neural networks -# University of Cologne, advisors: Gregor Gassner, Michael Schlottke-Lakemper -# This motivates the particular choice of fluxes, mesh resolution etc. - -############################################################################### -# semidiscretization of the compressible Euler equations -gamma = 1.4 -equations = CompressibleEulerEquations2D(gamma) - -""" - initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D) - -The Sedov blast wave setup based on Flash -- https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 -""" -function initial_condition_sedov_blast_wave(x, t, equations::CompressibleEulerEquations2D) - # Set up polar coordinates - inicenter = SVector(0.0, 0.0) - x_norm = x[1] - inicenter[1] - y_norm = x[2] - inicenter[2] - r = sqrt(x_norm^2 + y_norm^2) - - # Setup based on https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 - r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6) - # r0 = 0.5 # = more reasonable setup - E = 1.0 - p0_inner = 3 * (equations.gamma - 1) * E / (3 * pi * r0^2) - p0_outer = 1.0e-5 # = true Sedov setup - # p0_outer = 1.0e-3 # = more reasonable setup - - # Calculate primitive variables - rho = 1.0 - v1 = 0.0 - v2 = 0.0 - p = r > r0 ? p0_outer : p0_inner - - return prim2cons(SVector(rho, v1, v2, p), equations) -end -initial_condition = initial_condition_sedov_blast_wave - -surface_flux = flux_lax_friedrichs -volume_flux = flux_chandrashekar -basis = LobattoLegendreBasis(3) -indicator_sc = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable = density_pressure, - network = model2d) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) -solver = DGSEM(basis, surface_flux, volume_integral) - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 100_000) - -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 12.5) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 100 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true, - solution_variables = cons2prim) - -amr_indicator = IndicatorNeuralNetwork(semi, - indicator_type = NeuralNetworkPerssonPeraire(), - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = true, - variable = density_pressure, - network = model2d) -amr_controller = ControllerThreeLevel(semi, amr_indicator, - base_level = 4, - max_level = 6, max_threshold = 0.22) -amr_callback = AMRCallback(semi, amr_controller, - interval = 5, - adapt_initial_condition = true, - adapt_initial_condition_only_refine = true) - -stepsize_callback = StepsizeCallback(cfl = 0.9) - -callbacks = CallbackSet(summary_callback, - analysis_callback, alive_callback, - save_solution, - amr_callback, stepsize_callback) -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 97d518d5b78..79810186d4d 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -260,9 +260,7 @@ export load_mesh, load_time, load_timestep, load_timestep!, load_dt, load_adaptive_time_integrator! export ControllerThreeLevel, ControllerThreeLevelCombined, - IndicatorLöhner, IndicatorLoehner, IndicatorMax, - IndicatorNeuralNetwork, NeuralNetworkPerssonPeraire, NeuralNetworkRayHesthaven, - NeuralNetworkCNN + IndicatorLöhner, IndicatorLoehner, IndicatorMax # TODO: TrixiShallowWater: move new limiter export PositivityPreservingLimiterZhangShu, PositivityPreservingLimiterShallowWater @@ -303,10 +301,6 @@ function __init__() end end - @require Flux="587475ba-b771-5e3f-ad9e-33799f191a9c" begin - using .Flux: params - end - # FIXME upstream. This is a hacky workaround for # https://github.com/trixi-framework/Trixi.jl/issues/628 # https://github.com/trixi-framework/Trixi.jl/issues/1185 diff --git a/src/solvers/dgsem_tree/indicators.jl b/src/solvers/dgsem_tree/indicators.jl index 4b83e9c1a9e..bb9109f2762 100644 --- a/src/solvers/dgsem_tree/indicators.jl +++ b/src/solvers/dgsem_tree/indicators.jl @@ -332,199 +332,4 @@ function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorMax) summary_box(io, "IndicatorMax", setup) end end - -""" - IndicatorNeuralNetwork - -Artificial neural network based indicator used for shock-capturing or AMR. -Depending on the indicator_type, different input values and corresponding trained networks are used. - -`indicator_type = NeuralNetworkPerssonPeraire()` -- Input: The energies in lower modes as well as nnodes(dg). - -`indicator_type = NeuralNetworkRayHesthaven()` -- 1d Input: Cell average of the cell and its neighboring cells as well as the interface values. -- 2d Input: Linear modal values of the cell and its neighboring cells. - -- Ray, Hesthaven (2018) - "An artificial neural network as a troubled-cell indicator" - [doi:10.1016/j.jcp.2018.04.029](https://doi.org/10.1016/j.jcp.2018.04.029) -- Ray, Hesthaven (2019) - "Detecting troubled-cells on two-dimensional unstructured grids using a neural network" - [doi:10.1016/j.jcp.2019.07.043](https://doi.org/10.1016/j.jcp.2019.07.043) - -`indicator_type = CNN (Only in 2d)` -- Based on convolutional neural network. -- 2d Input: Interpolation of the nodal values of the `indicator.variable` to the 4x4 LGL nodes. - -If `alpha_continuous == true` the continuous network output for troubled cells (`alpha > 0.5`) is considered. -If the cells are good (`alpha < 0.5`), `alpha` is set to `0`. -If `alpha_continuous == false`, the blending factor is set to `alpha = 0` for good cells and -`alpha = 1` for troubled cells. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - -""" -struct IndicatorNeuralNetwork{IndicatorType, RealT <: Real, Variable, Chain, Cache} <: - AbstractIndicator - indicator_type::IndicatorType - alpha_max::RealT - alpha_min::RealT - alpha_smooth::Bool - alpha_continuous::Bool - alpha_amr::Bool - variable::Variable - network::Chain - cache::Cache -end - -# this method is used when the indicator is constructed as for shock-capturing volume integrals -function IndicatorNeuralNetwork(equations::AbstractEquations, basis; - indicator_type, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = false, - variable, - network) - alpha_max, alpha_min = promote(alpha_max, alpha_min) - IndicatorType = typeof(indicator_type) - cache = create_cache(IndicatorNeuralNetwork{IndicatorType}, equations, basis) - IndicatorNeuralNetwork{IndicatorType, typeof(alpha_max), typeof(variable), - typeof(network), typeof(cache)}(indicator_type, alpha_max, - alpha_min, alpha_smooth, - alpha_continuous, alpha_amr, - variable, - network, cache) -end - -# this method is used when the indicator is constructed as for AMR -function IndicatorNeuralNetwork(semi::AbstractSemidiscretization; - indicator_type, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - alpha_continuous = true, - alpha_amr = true, - variable, - network) - alpha_max, alpha_min = promote(alpha_max, alpha_min) - IndicatorType = typeof(indicator_type) - cache = create_cache(IndicatorNeuralNetwork{IndicatorType}, semi) - IndicatorNeuralNetwork{IndicatorType, typeof(alpha_max), typeof(variable), - typeof(network), typeof(cache)}(indicator_type, alpha_max, - alpha_min, alpha_smooth, - alpha_continuous, alpha_amr, - variable, - network, cache) -end - -function Base.show(io::IO, indicator::IndicatorNeuralNetwork) - @nospecialize indicator # reduce precompilation time - - print(io, "IndicatorNeuralNetwork(") - print(io, indicator.indicator_type) - print(io, ", alpha_max=", indicator.alpha_max) - print(io, ", alpha_min=", indicator.alpha_min) - print(io, ", alpha_smooth=", indicator.alpha_smooth) - print(io, ", alpha_continuous=", indicator.alpha_continuous) - print(io, indicator.variable) - print(io, ")") -end - -function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorNeuralNetwork) - @nospecialize indicator # reduce precompilation time - - if get(io, :compact, false) - show(io, indicator) - else - setup = [ - "indicator type" => indicator.indicator_type, - "max. α" => indicator.alpha_max, - "min. α" => indicator.alpha_min, - "smooth α" => (indicator.alpha_smooth ? "yes" : "no"), - "continuous α" => (indicator.alpha_continuous ? "yes" : "no"), - "indicator variable" => indicator.variable, - ] - summary_box(io, "IndicatorNeuralNetwork", setup) - end -end - -# Convert probability for troubled cell to indicator value for shockcapturing/AMR -@inline function probability_to_indicator(probability_troubled_cell, alpha_continuous, - alpha_amr, - alpha_min, alpha_max) - # Initialize indicator to zero - alpha_element = zero(probability_troubled_cell) - - if alpha_continuous && !alpha_amr - # Set good cells to 0 and troubled cells to continuous value of the network prediction - if probability_troubled_cell > 0.5 - alpha_element = probability_troubled_cell - else - alpha_element = zero(probability_troubled_cell) - end - - # Take care of the case close to pure FV - if alpha_element > 1 - alpha_min - alpha_element = one(alpha_element) - end - - # Scale the probability for a troubled cell (in [0,1]) to the maximum allowed alpha - alpha_element *= alpha_max - elseif !alpha_continuous && !alpha_amr - # Set good cells to 0 and troubled cells to 1 - if probability_troubled_cell > 0.5 - alpha_element = alpha_max - else - alpha_element = zero(alpha_max) - end - elseif alpha_amr - # The entire continuous output of the neural network is used for AMR - alpha_element = probability_troubled_cell - - # Scale the probability for a troubled cell (in [0,1]) to the maximum allowed alpha - alpha_element *= alpha_max - end - - return alpha_element -end - -""" - NeuralNetworkPerssonPeraire - -Indicator type for creating an `IndicatorNeuralNetwork` indicator. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - -See also: [`IndicatorNeuralNetwork`](@ref) -""" -struct NeuralNetworkPerssonPeraire end - -""" - NeuralNetworkRayHesthaven - -Indicator type for creating an `IndicatorNeuralNetwork` indicator. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - -See also: [`IndicatorNeuralNetwork`](@ref) -""" -struct NeuralNetworkRayHesthaven end - -""" - NeuralNetworkCNN - -Indicator type for creating an `IndicatorNeuralNetwork` indicator. - -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. - -See also: [`IndicatorNeuralNetwork`](@ref) -""" -struct NeuralNetworkCNN end end # @muladd diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index 8b57348861c..763a6b5661c 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -304,222 +304,4 @@ function (indicator_max::IndicatorMax)(u::AbstractArray{<:Any, 3}, return alpha end - -# this method is used when the indicator is constructed as for shock-capturing volume integrals -# empty cache is default -function create_cache(::Type{<:IndicatorNeuralNetwork}, - equations::AbstractEquations{1}, basis::LobattoLegendreBasis) - return NamedTuple() -end - -# cache for NeuralNetworkPerssonPeraire-type indicator -function create_cache(::Type{IndicatorNeuralNetwork{NeuralNetworkPerssonPeraire}}, - equations::AbstractEquations{1}, basis::LobattoLegendreBasis) - alpha = Vector{real(basis)}() - alpha_tmp = similar(alpha) - A = Array{real(basis), ndims(equations)} - - prototype = A(undef, nnodes(basis)) - indicator_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - modal_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - - return (; alpha, alpha_tmp, indicator_threaded, modal_threaded) -end - -# cache for NeuralNetworkRayHesthaven-type indicator -function create_cache(::Type{IndicatorNeuralNetwork{NeuralNetworkRayHesthaven}}, - equations::AbstractEquations{1}, basis::LobattoLegendreBasis) - alpha = Vector{real(basis)}() - alpha_tmp = similar(alpha) - A = Array{real(basis), ndims(equations)} - - prototype = A(undef, nnodes(basis)) - indicator_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - neighbor_ids = Vector{Int}(undef, 2) - - return (; alpha, alpha_tmp, indicator_threaded, neighbor_ids) -end - -# this method is used when the indicator is constructed as for AMR -function create_cache(typ::Type{<:IndicatorNeuralNetwork}, - mesh, equations::AbstractEquations{1}, dg::DGSEM, cache) - create_cache(typ, equations, dg.basis) -end - -function (indicator_ann::IndicatorNeuralNetwork{NeuralNetworkPerssonPeraire})(u::AbstractArray{ - <:Any, - 3 - }, - mesh, - equations, - dg::DGSEM, - cache; - kwargs...) - @unpack indicator_type, alpha_max, alpha_min, alpha_smooth, alpha_continuous, alpha_amr, variable, network = indicator_ann - - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded = indicator_ann.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - - # Calculate indicator variables at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, element) - indicator[i] = indicator_ann.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for i in 1:nnodes(dg) - total_energy += modal[i]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i]^2 - end - - # Calculate energy in highest modes - X1 = (total_energy - total_energy_clip1) / total_energy - X2 = (total_energy_clip1 - total_energy_clip2) / total_energy_clip1 - - # There are two versions of the network: - # The first one only takes the highest energy modes as input, the second one also the number of - # nodes. Automatically use the correct input by checking the number of inputs of the network. - if size(params(network)[1], 2) == 2 - network_input = SVector(X1, X2) - elseif size(params(network)[1], 2) == 3 - network_input = SVector(X1, X2, nnodes(dg)) - end - - # Scale input data - network_input = network_input / - max(maximum(abs, network_input), one(eltype(network_input))) - probability_troubled_cell = network(network_input)[1] - - # Compute indicator value - alpha[element] = probability_to_indicator(probability_troubled_cell, - alpha_continuous, - alpha_amr, alpha_min, alpha_max) - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - -function (indicator_ann::IndicatorNeuralNetwork{NeuralNetworkRayHesthaven})(u::AbstractArray{ - <:Any, - 3 - }, - mesh, - equations, - dg::DGSEM, - cache; - kwargs...) - @unpack indicator_type, alpha_max, alpha_min, alpha_smooth, alpha_continuous, alpha_amr, variable, network = indicator_ann - - @unpack alpha, alpha_tmp, indicator_threaded, neighbor_ids = indicator_ann.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - c2e = zeros(Int, length(mesh.tree)) - for element in eachelement(dg, cache) - c2e[cache.elements.cell_ids[element]] = element - end - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - cell_id = cache.elements.cell_ids[element] - - for direction in eachdirection(mesh.tree) - if !has_any_neighbor(mesh.tree, cell_id, direction) - neighbor_ids[direction] = element - continue - end - if has_neighbor(mesh.tree, cell_id, direction) - neighbor_cell_id = mesh.tree.neighbor_ids[direction, cell_id] - if has_children(mesh.tree, neighbor_cell_id) # Cell has small neighbor - if direction == 1 - neighbor_ids[direction] = c2e[mesh.tree.child_ids[2, - neighbor_cell_id]] - else - neighbor_ids[direction] = c2e[mesh.tree.child_ids[1, - neighbor_cell_id]] - end - else # Cell has same refinement level neighbor - neighbor_ids[direction] = c2e[neighbor_cell_id] - end - else # Cell is small and has large neighbor - parent_id = mesh.tree.parent_ids[cell_id] - neighbor_cell_id = mesh.tree.neighbor_ids[direction, parent_id] - neighbor_ids[direction] = c2e[neighbor_cell_id] - end - end - - # Calculate indicator variables at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, element) - indicator[i] = indicator_ann.variable(u_local, equations) - end - - # Cell average and interface values of the cell - X2 = sum(indicator) / nnodes(dg) - X4 = indicator[1] - X5 = indicator[end] - - # Calculate indicator variables from left neighboring cell at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, neighbor_ids[1]) - indicator[i] = indicator_ann.variable(u_local, equations) - end - X1 = sum(indicator) / nnodes(dg) - - # Calculate indicator variables from right neighboring cell at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, neighbor_ids[2]) - indicator[i] = indicator_ann.variable(u_local, equations) - end - X3 = sum(indicator) / nnodes(dg) - network_input = SVector(X1, X2, X3, X4, X5) - - # Scale input data - network_input = network_input / - max(maximum(abs, network_input), one(eltype(network_input))) - probability_troubled_cell = network(network_input)[1] - - # Compute indicator value - alpha[element] = probability_to_indicator(probability_troubled_cell, - alpha_continuous, - alpha_amr, alpha_min, alpha_max) - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end end # @muladd diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index 2f34e0eb661..3f4fb90683b 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -338,355 +338,4 @@ function (indicator_max::IndicatorMax)(u::AbstractArray{<:Any, 4}, return alpha end - -# this method is used when the indicator is constructed as for shock-capturing volume integrals -# empty cache is default -function create_cache(::Type{IndicatorNeuralNetwork}, - equations::AbstractEquations{2}, basis::LobattoLegendreBasis) - return NamedTuple() -end - -# cache for NeuralNetworkPerssonPeraire-type indicator -function create_cache(::Type{IndicatorNeuralNetwork{NeuralNetworkPerssonPeraire}}, - equations::AbstractEquations{2}, basis::LobattoLegendreBasis) - alpha = Vector{real(basis)}() - alpha_tmp = similar(alpha) - A = Array{real(basis), ndims(equations)} - - @assert nnodes(basis)>=4 "Indicator only works for nnodes >= 4 (polydeg > 2)" - - prototype = A(undef, nnodes(basis), nnodes(basis)) - indicator_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - modal_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - modal_tmp1_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - - return (; alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded) -end - -# cache for NeuralNetworkRayHesthaven-type indicator -function create_cache(::Type{IndicatorNeuralNetwork{NeuralNetworkRayHesthaven}}, - equations::AbstractEquations{2}, basis::LobattoLegendreBasis) - alpha = Vector{real(basis)}() - alpha_tmp = similar(alpha) - A = Array{real(basis), ndims(equations)} - - prototype = A(undef, nnodes(basis), nnodes(basis)) - indicator_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - modal_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - modal_tmp1_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - - network_input = Vector{Float64}(undef, 15) - neighbor_ids = Array{Int64}(undef, 8) - neighbor_mean = Array{Float64}(undef, 4, 3) - - return (; alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded, - network_input, neighbor_ids, neighbor_mean) -end - -# cache for NeuralNetworkCNN-type indicator -function create_cache(::Type{IndicatorNeuralNetwork{NeuralNetworkCNN}}, - equations::AbstractEquations{2}, basis::LobattoLegendreBasis) - alpha = Vector{real(basis)}() - alpha_tmp = similar(alpha) - A = Array{real(basis), ndims(equations)} - - prototype = A(undef, nnodes(basis), nnodes(basis)) - indicator_threaded = [similar(prototype) for _ in 1:Threads.nthreads()] - n_cnn = 4 - nodes, _ = gauss_lobatto_nodes_weights(nnodes(basis)) - cnn_nodes, _ = gauss_lobatto_nodes_weights(n_cnn) - vandermonde = polynomial_interpolation_matrix(nodes, cnn_nodes) - network_input = Array{Float32}(undef, n_cnn, n_cnn, 1, 1) - - return (; alpha, alpha_tmp, indicator_threaded, nodes, cnn_nodes, vandermonde, - network_input) -end - -# this method is used when the indicator is constructed as for AMR -function create_cache(typ::Type{<:IndicatorNeuralNetwork}, - mesh, equations::AbstractEquations{2}, dg::DGSEM, cache) - create_cache(typ, equations, dg.basis) -end - -function (indicator_ann::IndicatorNeuralNetwork{NeuralNetworkPerssonPeraire})(u, - mesh::TreeMesh{ - 2 - }, - equations, - dg::DGSEM, - cache; - kwargs...) - @unpack indicator_type, alpha_max, alpha_min, alpha_smooth, alpha_continuous, alpha_amr, variable, network = indicator_ann - - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded = indicator_ann.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - modal_tmp1 = modal_tmp1_threaded[Threads.threadid()] - - # Calculate indicator variables at Gauss-Lobatto nodes - for j in eachnode(dg), i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, j, element) - indicator[i, j] = indicator_ann.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator, modal_tmp1) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for j in 1:nnodes(dg), i in 1:nnodes(dg) - total_energy += modal[i, j]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 1), i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i, j]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 2), i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i, j]^2 - end - total_energy_clip3 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 3), i in 1:(nnodes(dg) - 3) - total_energy_clip3 += modal[i, j]^2 - end - - # Calculate energy in higher modes and polynomial degree for the network input - X1 = (total_energy - total_energy_clip1) / total_energy - X2 = (total_energy_clip1 - total_energy_clip2) / total_energy_clip1 - X3 = (total_energy_clip2 - total_energy_clip3) / total_energy_clip2 - X4 = nnodes(dg) - network_input = SVector(X1, X2, X3, X4) - - # Scale input data - network_input = network_input / - max(maximum(abs, network_input), one(eltype(network_input))) - probability_troubled_cell = network(network_input)[1] - - # Compute indicator value - alpha[element] = probability_to_indicator(probability_troubled_cell, - alpha_continuous, - alpha_amr, alpha_min, alpha_max) - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - -function (indicator_ann::IndicatorNeuralNetwork{NeuralNetworkRayHesthaven})(u, - mesh::TreeMesh{ - 2 - }, - equations, - dg::DGSEM, - cache; - kwargs...) - @unpack indicator_type, alpha_max, alpha_min, alpha_smooth, alpha_continuous, alpha_amr, variable, network = indicator_ann - - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded, network_input, neighbor_ids, neighbor_mean = indicator_ann.cache #X, network_input - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - c2e = zeros(Int, length(mesh.tree)) - for element in eachelement(dg, cache) - c2e[cache.elements.cell_ids[element]] = element - end - - X = Array{Float64}(undef, 3, nelements(dg, cache)) - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - modal_tmp1 = modal_tmp1_threaded[Threads.threadid()] - - # Calculate indicator variables at Gauss-Lobatto nodes - for j in eachnode(dg), i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, j, element) - indicator[i, j] = indicator_ann.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator, modal_tmp1) - # Save linear modal coefficients for the network input - X[1, element] = modal[1, 1] - X[2, element] = modal[1, 2] - X[3, element] = modal[2, 1] - end - - @threaded for element in eachelement(dg, cache) - cell_id = cache.elements.cell_ids[element] - - network_input[1] = X[1, element] - network_input[2] = X[2, element] - network_input[3] = X[3, element] - - for direction in eachdirection(mesh.tree) - if direction == 1 # -x - dir = 4 - elseif direction == 2 # +x - dir = 1 - elseif direction == 3 # -y - dir = 3 - elseif direction == 4 # +y - dir = 2 - end - - # Of no neighbor exists and current cell is not small - if !has_any_neighbor(mesh.tree, cell_id, direction) - network_input[3 * dir + 1] = X[1, element] - network_input[3 * dir + 2] = X[2, element] - network_input[3 * dir + 3] = X[3, element] - continue - end - - # Get Input data from neighbors - if has_neighbor(mesh.tree, cell_id, direction) - neighbor_cell_id = mesh.tree.neighbor_ids[direction, cell_id] - if has_children(mesh.tree, neighbor_cell_id) # Cell has small neighbor - # Mean over 4 neighbor cells - neighbor_ids[1] = mesh.tree.child_ids[1, neighbor_cell_id] - neighbor_ids[2] = mesh.tree.child_ids[2, neighbor_cell_id] - neighbor_ids[3] = mesh.tree.child_ids[3, neighbor_cell_id] - neighbor_ids[4] = mesh.tree.child_ids[4, neighbor_cell_id] - - for i in 1:4 - if has_children(mesh.tree, neighbor_ids[i]) - neighbor_ids5 = c2e[mesh.tree.child_ids[1, neighbor_ids[i]]] - neighbor_ids6 = c2e[mesh.tree.child_ids[2, neighbor_ids[i]]] - neighbor_ids7 = c2e[mesh.tree.child_ids[3, neighbor_ids[i]]] - neighbor_ids8 = c2e[mesh.tree.child_ids[4, neighbor_ids[i]]] - - neighbor_mean[i, 1] = (X[1, neighbor_ids5] + - X[1, neighbor_ids6] + - X[1, neighbor_ids7] + - X[1, neighbor_ids8]) / 4 - neighbor_mean[i, 2] = (X[2, neighbor_ids5] + - X[2, neighbor_ids6] + - X[2, neighbor_ids7] + - X[2, neighbor_ids8]) / 4 - neighbor_mean[i, 3] = (X[3, neighbor_ids5] + - X[3, neighbor_ids6] + - X[3, neighbor_ids7] + - X[3, neighbor_ids8]) / 4 - else - neighbor_id = c2e[neighbor_ids[i]] - neighbor_mean[i, 1] = X[1, neighbor_id] - neighbor_mean[i, 2] = X[2, neighbor_id] - neighbor_mean[i, 3] = X[3, neighbor_id] - end - end - network_input[3 * dir + 1] = (neighbor_mean[1, 1] + - neighbor_mean[2, 1] + - neighbor_mean[3, 1] + - neighbor_mean[4, 1]) / 4 - network_input[3 * dir + 2] = (neighbor_mean[1, 2] + - neighbor_mean[2, 2] + - neighbor_mean[3, 2] + - neighbor_mean[4, 2]) / 4 - network_input[3 * dir + 3] = (neighbor_mean[1, 3] + - neighbor_mean[2, 3] + - neighbor_mean[3, 3] + - neighbor_mean[4, 3]) / 4 - - else # Cell has same refinement level neighbor - neighbor_id = c2e[neighbor_cell_id] - network_input[3 * dir + 1] = X[1, neighbor_id] - network_input[3 * dir + 2] = X[2, neighbor_id] - network_input[3 * dir + 3] = X[3, neighbor_id] - end - else # Cell is small and has large neighbor - parent_id = mesh.tree.parent_ids[cell_id] - neighbor_id = c2e[mesh.tree.neighbor_ids[direction, parent_id]] - - network_input[3 * dir + 1] = X[1, neighbor_id] - network_input[3 * dir + 2] = X[2, neighbor_id] - network_input[3 * dir + 3] = X[3, neighbor_id] - end - end - - # Scale input data - network_input = network_input / - max(maximum(abs, network_input), one(eltype(network_input))) - probability_troubled_cell = network(network_input)[1] - - # Compute indicator value - alpha[element] = probability_to_indicator(probability_troubled_cell, - alpha_continuous, - alpha_amr, alpha_min, alpha_max) - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - -function (indicator_ann::IndicatorNeuralNetwork{NeuralNetworkCNN})(u, mesh::TreeMesh{2}, - equations, dg::DGSEM, - cache; kwargs...) - @unpack indicator_type, alpha_max, alpha_min, alpha_smooth, alpha_continuous, alpha_amr, variable, network = indicator_ann - - @unpack alpha, alpha_tmp, indicator_threaded, nodes, cnn_nodes, vandermonde, network_input = indicator_ann.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - - # Calculate indicator variables at Gauss-Lobatto nodes - for j in eachnode(dg), i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, j, element) - indicator[i, j] = indicator_ann.variable(u_local, equations) - end - - # Interpolate nodal data to 4x4 LGL nodes - for j in 1:4, i in 1:4 - acc = zero(eltype(indicator)) - for jj in eachnode(dg), ii in eachnode(dg) - acc += vandermonde[i, ii] * indicator[ii, jj] * vandermonde[j, jj] - end - network_input[i, j, 1, 1] = acc - end - - # Scale input data - network_input = network_input / - max(maximum(abs, network_input), one(eltype(network_input))) - probability_troubled_cell = network(network_input)[1] - - # Compute indicator value - alpha[element] = probability_to_indicator(probability_troubled_cell, - alpha_continuous, - alpha_amr, alpha_min, alpha_max) - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end end # @muladd diff --git a/test/Project.toml b/test/Project.toml index c45be49a5d0..7d4f6f031b4 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,9 +1,7 @@ [deps] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" -BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" @@ -15,9 +13,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] Aqua = "0.7" -BSON = "0.3.3" CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" -Flux = "0.13.15, 0.14" ForwardDiff = "0.10" MPI = "0.20" OrdinaryDiffEq = "6.49.1" diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index 62afc5baee3..6cd7998ab02 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -393,26 +393,6 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end - -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl"), - l2=[0.21814833203212694, 0.2818328665444332, 0.5528379124720818], - linf=[1.5548653877320868, 1.4474018998129738, 2.071919577393772], - maxiters=30) -end - -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl"), - l2=[0.22054468879127423, 0.2828269190680846, 0.5542369885642424], - linf=[ - 1.5623359741479623, - 1.4290121654488288, - 2.1040405133123072, - ], - maxiters=30) -end end end # module diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index db36cb7d79f..69824511759 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -260,89 +260,6 @@ end end end -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_perssonperaire.jl"), - l2=[ - 0.4758794741390833, - 0.21045415565179362, - 0.21045325630191866, - 0.7022517958549878, - ], - linf=[ - 1.710832148442441, - 0.9711663578827681, - 0.9703787873632452, - 2.9619758810532653, - ], - initial_refinement_level=4, - maxiters=50) -end - -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl"), - l2=[ - 0.472445774440313, - 0.2090782039442978, - 0.20885558673697927, - 0.700569533591275, - ], - linf=[ - 1.7066492792835155, - 0.9856122336679919, - 0.9784316656930644, - 2.9372978989672873, - ], - initial_refinement_level=4, - maxiters=50) -end - -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl with mortars" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_rayhesthaven.jl"), - l2=[ - 0.016486406327766923, - 0.03097329879894433, - 0.03101012918167401, - 0.15157175775429868, - ], - linf=[ - 0.27688647744873407, - 0.5653724536715139, - 0.565695523611447, - 2.513047611639946, - ], - refinement_patches=((type = "box", - coordinates_min = (-0.25, -0.25), - coordinates_max = (0.25, 0.25)), - (type = "box", - coordinates_min = (-0.125, -0.125), - coordinates_max = (0.125, 0.125))), - initial_refinement_level=4, - maxiters=5) -end - -@trixi_testset "elixir_euler_blast_wave_neuralnetwork_cnn.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_blast_wave_neuralnetwork_cnn.jl"), - l2=[ - 0.4795795496408325, - 0.2125148972465021, - 0.21311260934645868, - 0.7033388737692883, - ], - linf=[ - 1.8295385992182336, - 0.9687795218482794, - 0.9616033072376108, - 2.9513245978047133, - ], - initial_refinement_level=4, - maxiters=50, - rtol=1.0e-7) -end - @trixi_testset "elixir_euler_blast_wave_pure_fv.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_blast_wave_pure_fv.jl"), l2=[ @@ -448,25 +365,6 @@ end end end -@trixi_testset "elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_sedov_blast_wave_neuralnetwork_perssonperaire.jl"), - l2=[ - 0.0845430093623868, - 0.09271459184623232, - 0.09271459184623232, - 0.4377291875101709, - ], - linf=[ - 1.3608553480069898, - 1.6822884847136004, - 1.6822884847135997, - 4.2201475428867035, - ], - maxiters=30, - coverage_override=(maxiters = 6,)) -end - @trixi_testset "elixir_euler_positivity.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_positivity.jl"), l2=[ @@ -624,18 +522,6 @@ end end end -@trixi_testset "elixir_euler_kelvin_helmholtz_instability_amr_neuralnetwork_perssonperaire.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_euler_kelvin_helmholtz_instability_amr_neuralnetwork_perssonperaire.jl"), - # This stuff is experimental and annoying to test. In the future, we plan - # to move it to another repository. Thus, we save developer time right now - # and do not run these tests anymore. - # l2 = [0.0009823702998067061, 0.004943231496200673, 0.0048604522073091815, 0.00496983530893294], - # linf = [0.00855717053383187, 0.02087422420794427, 0.017121993783086185, 0.02720703869972585], - maxiters=30, - coverage_override=(maxiters = 2,)) -end - @trixi_testset "elixir_euler_colliding_flow.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_colliding_flow.jl"), l2=[ diff --git a/test/test_unit.jl b/test/test_unit.jl index a73dfab5504..82108677633 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -427,14 +427,6 @@ end indicator_max = IndicatorMax("variable", (; cache = nothing)) @test_nowarn show(stdout, indicator_max) - - equations = CompressibleEulerEquations2D(1.4) - basis = LobattoLegendreBasis(3) - indicator_neuralnetwork = IndicatorNeuralNetwork(equations, basis, - indicator_type = NeuralNetworkPerssonPeraire(), - variable = density, - network = nothing) - @test_nowarn show(stdout, indicator_neuralnetwork) end @timed_testset "LBM 2D constructor" begin