From ed2c63921ab585bbdddaad6bd5a52a26d30993b0 Mon Sep 17 00:00:00 2001 From: "Z J Wegert (Workstation)" Date: Tue, 7 May 2024 11:29:24 +1000 Subject: [PATCH] Rename package --- .gitignore | 2 +- Project.toml | 2 +- README.md | 8 +- compile/compile.jl | 4 +- compile/warmup.jl | 4 +- docs/make.jl | 6 +- docs/src/dev/shape_der.md | 12 +- docs/src/getting-started.md | 18 +-- docs/src/index.md | 10 +- docs/src/reference/benchmarking.md | 16 +-- docs/src/reference/chainrules.md | 66 +++++------ docs/src/reference/io.md | 16 +-- docs/src/reference/levelsetevolution.md | 32 +++--- docs/src/reference/optimisers.md | 26 ++--- docs/src/reference/utilities.md | 10 +- docs/src/reference/velext.md | 2 +- .../tutorials/minimum_thermal_compliance.md | 84 +++++++------- scripts/Benchmarks/benchmark.jl | 32 +++--- .../Benchmarks/generate_benchmark_scripts.jl | 12 +- scripts/MPI/3d_elastic_compliance_ALM.jl | 2 +- scripts/MPI/3d_hyperelastic_compliance_ALM.jl | 2 +- .../3d_hyperelastic_compliance_neohook_ALM.jl | 2 +- scripts/MPI/3d_inverse_homenisation_ALM.jl | 2 +- scripts/MPI/3d_inverter_ALM.jl | 2 +- scripts/MPI/3d_inverter_HPM.jl | 2 +- .../3d_nonlinear_thermal_compliance_ALM.jl | 2 +- scripts/MPI/3d_thermal_compliance_ALM.jl | 2 +- scripts/MPI/elastic_compliance_ALM.jl | 2 +- scripts/MPI/inverse_homenisation_ALM.jl | 2 +- .../MPI/nonlinear_thermal_compliance_ALM.jl | 2 +- scripts/MPI/thermal_compliance_ALM.jl | 2 +- scripts/MPI/thermal_compliance_ALM_PETSc.jl | 2 +- scripts/Serial/elastic_compliance_ALM.jl | 2 +- scripts/Serial/elastic_compliance_HPM.jl | 2 +- scripts/Serial/elastic_compliance_LM.jl | 2 +- scripts/Serial/hyperelastic_compliance_ALM.jl | 2 +- .../hyperelastic_compliance_neohook_ALM.jl | 2 +- scripts/Serial/inverse_homenisation_ALM.jl | 2 +- scripts/Serial/inverter_ALM.jl | 2 +- scripts/Serial/inverter_HPM.jl | 2 +- .../nonlinear_thermal_compliance_ALM.jl | 2 +- scripts/Serial/thermal_compliance_ALM.jl | 2 +- .../thermal_compliance_ALM_higherorderlsf.jl | 2 +- src/ChainRules.jl | 104 +++++++++--------- src/{LevelSetTopOpt.jl => GridapTopOpt.jl} | 4 +- src/Optimisers/Optimisers.jl | 14 +-- test/seq/InverseHomogenisationALMTests.jl | 8 +- test/seq/InverterHPMTests.jl | 14 +-- .../seq/NonlinearThermalComplianceALMTests.jl | 12 +- test/seq/PZMultiFieldRepeatingStateTests.jl | 16 +-- test/seq/ThermalComplianceALMTests.jl | 14 +-- 51 files changed, 298 insertions(+), 298 deletions(-) rename src/{LevelSetTopOpt.jl => GridapTopOpt.jl} (94%) diff --git a/.gitignore b/.gitignore index d43b6663..ddd3c8e5 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,4 @@ docs/site/ # environment. Manifest.toml LocalPreferences.toml -LevelSetTopOpt.so +GridapTopOpt.so diff --git a/Project.toml b/Project.toml index 8f014570..1ad4f421 100755 --- a/Project.toml +++ b/Project.toml @@ -1,4 +1,4 @@ -name = "LevelSetTopOpt" +name = "GridapTopOpt" uuid = "27dd0110-1916-4fd6-8b4b-1bc109db1170" authors = ["Zach Wegert ", "JordiManyer "] version = "0.1.0" diff --git a/README.md b/README.md index f49144ea..d75a3ce5 100755 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# LevelSetTopOpt +# GridapTopOpt | **badges** | -LevelSetTopOpt is computational toolbox for level set-based topology optimisation implemented in Julia and the [Gridap](https://github.com/gridap/Gridap.jl) package ecosystem. See the documentation and following publication for further details: +GridapTopOpt is computational toolbox for level set-based topology optimisation implemented in Julia and the [Gridap](https://github.com/gridap/Gridap.jl) package ecosystem. See the documentation and following publication for further details: -> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "LevelSetTopOpt.jl: A scalable Julia toolbox for level set-based topology optimisation". In preparation. +> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "GridapTopOpt.jl: A scalable Julia toolbox for level set-based topology optimisation". In preparation. ## Documentation @@ -12,4 +12,4 @@ LevelSetTopOpt is computational toolbox for level set-based topology optimisatio - [**LATEST**](...) — *Documentation for the in-development version.* ## Citation -In order to give credit to the `LevelSetTopOpt` contributors, we ask that you please reference the above paper along with the required citations for [Gridap](https://github.com/gridap/Gridap.jl?tab=readme-ov-file#how-to-cite-gridap). \ No newline at end of file +In order to give credit to the `GridapTopOpt` contributors, we ask that you please reference the above paper along with the required citations for [Gridap](https://github.com/gridap/Gridap.jl?tab=readme-ov-file#how-to-cite-gridap). \ No newline at end of file diff --git a/compile/compile.jl b/compile/compile.jl index 60943604..f3fc72d6 100644 --- a/compile/compile.jl +++ b/compile/compile.jl @@ -1,5 +1,5 @@ using PackageCompiler -create_sysimage([:LevelSetTopOpt], - sysimage_path=joinpath(@__DIR__,"..","LevelSetTopOpt.so"), +create_sysimage([:GridapTopOpt], + sysimage_path=joinpath(@__DIR__,"..","GridapTopOpt.so"), precompile_execution_file=joinpath(@__DIR__,"warmup.jl")) diff --git a/compile/warmup.jl b/compile/warmup.jl index c2b6e5e3..feed08f6 100644 --- a/compile/warmup.jl +++ b/compile/warmup.jl @@ -1,4 +1,4 @@ -using Gridap, GridapDistributed, GridapPETSc, PartitionedArrays, LevelSetTopOpt +using Gridap, GridapDistributed, GridapPETSc, PartitionedArrays, GridapTopOpt """ (MPI) Minimum thermal compliance with Lagrangian method in 2D. @@ -83,7 +83,7 @@ function main(mesh_partition,distribute) vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) ## Optimiser - _conv_cond = t->LevelSetTopOpt.conv_cond(t;coef=1/50); + _conv_cond = t->GridapTopOpt.conv_cond(t;coef=1/50); optimiser = AugmentedLagrangian(φ,pcfs,ls_evo,vel_ext,interp,el_size,γ,γ_reinit,conv_criterion=_conv_cond); for history in optimiser it,Ji,_,_ = last(history) diff --git a/docs/make.jl b/docs/make.jl index 6edb2324..7755b4c2 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -3,15 +3,15 @@ # using Pkg; Pkg.activate(".") using Documenter -using LevelSetTopOpt +using GridapTopOpt makedocs( - sitename = "LevelSetTopOpt.jl", + sitename = "GridapTopOpt.jl", format = Documenter.HTML( prettyurls = false, # collapselevel = 1, ), - modules = [LevelSetTopOpt], + modules = [GridapTopOpt], pages = [ "Home" => "index.md", "Getting Started" => "getting-started.md", diff --git a/docs/src/dev/shape_der.md b/docs/src/dev/shape_der.md index 8d4ddb8d..f0477089 100644 --- a/docs/src/dev/shape_der.md +++ b/docs/src/dev/shape_der.md @@ -32,7 +32,7 @@ The final result of course does not yet match ``(\star)``. Over a fixed computat J_1^\prime(\Omega)(-v\boldsymbol{n})=-\int_{D}vf(\boldsymbol{x})H'(\varphi)\lvert\nabla\varphi\rvert~\mathrm{d}s. ``` -As ``\varphi`` is a signed distance function we have ``\lvert\nabla\varphi\rvert=1`` for ``D\setminus\Sigma`` where ``\Sigma`` is the skeleton of ``\Omega`` and ``\Omega^\complement``. Furthermore, ``H'(\varphi)`` provides support only within a band of ``\partial\Omega``. +As ``\varphi`` is a signed distance function we have ``\lvert\nabla\varphi\rvert=1`` for ``D\setminus\Sigma`` where ``\Sigma`` is the skeleton of ``\Omega`` and ``\Omega^\complement``. Furthermore, ``H'(\varphi)`` provides support only within a band of ``\partial\Omega``. !!! tip "Result I" Therefore, we have that almost everywhere @@ -71,9 +71,9 @@ Consider ``\Omega\subset D`` with ``J(\Omega)=\int_\Omega j(\boldsymbol{u})~\mat In the above ``\boldsymbol{\varepsilon}`` is the strain tensor, ``\boldsymbol{C}`` is the stiffness tensor, ``\Gamma_0 = \partial\Omega\setminus(\Gamma_D\cup\Gamma_N\cup\Gamma_R)``, and ``\Gamma_D``, ``\Gamma_N``, ``\Gamma_R`` are required to be fixed. ### Shape derivative -Let us first consider the shape derivative of ``J``. Disregarding embedding inside the computational domain ``D``, the above strong form can be written in weak form as: +Let us first consider the shape derivative of ``J``. Disregarding embedding inside the computational domain ``D``, the above strong form can be written in weak form as: -``\quad`` *Find* ``\boldsymbol{u}\in H^1_{\Gamma_D}(\Omega)^d`` *such that* +``\quad`` *Find* ``\boldsymbol{u}\in H^1_{\Gamma_D}(\Omega)^d`` *such that* ```math \int_{\Omega} \boldsymbol{C\varepsilon}(\boldsymbol{u})\boldsymbol{\varepsilon}(\boldsymbol{v})~\mathrm{d}\boldsymbol{x}+\int_{\Gamma_R}\boldsymbol{w}(\boldsymbol{u})\cdot\boldsymbol{v}~\mathrm{d}s=\int_\Omega \boldsymbol{f}\cdot\boldsymbol{v}~\mathrm{d}\boldsymbol{x}+\int_{\Gamma_N}\boldsymbol{g}\cdot\boldsymbol{v}~\mathrm{d}s,~\forall \boldsymbol{v}\in H^1_{\Gamma_D}(\Omega)^d. @@ -137,7 +137,7 @@ As required. Note that in the above, we have used that ``\boldsymbol{\theta}\cdo ### Gâteaux derivative in ``\varphi`` -Let us now return to derivatives of ``J`` with respect to ``\varphi`` over the whole computational domain. As previously, suppose that we rewrite ``J`` as +Let us now return to derivatives of ``J`` with respect to ``\varphi`` over the whole computational domain. As previously, suppose that we rewrite ``J`` as ```math \hat{\mathcal{J}}(\varphi)=\int_D (1-H(\varphi))j(\boldsymbol{u})~\mathrm{d}\boldsymbol{x}+\int_{\Gamma_N} l_1(\boldsymbol{u})~\mathrm{d}s+\int_{\Gamma_R} l_2(\boldsymbol{u})~\mathrm{d}s @@ -212,7 +212,7 @@ This exactly as previously up to relaxation over ``D``. Finally, The derivative \end{aligned} ``` -where we have used that ``v=0`` on ``\Gamma_D`` as previously. As previously taking ``\boldsymbol{\theta}=v\boldsymbol{n}`` and relaxing the shape derivative of ``J`` over ``D`` with a signed distance function ``\varphi`` yields: +where we have used that ``v=0`` on ``\Gamma_D`` as previously. As previously taking ``\boldsymbol{\theta}=v\boldsymbol{n}`` and relaxing the shape derivative of ``J`` over ``D`` with a signed distance function ``\varphi`` yields: !!! tip "Result II" ```math @@ -227,7 +227,7 @@ Owing to a fixed computational regime we do not capture a variation of the domai In addition, functionals of the signed distance function posed over the whole bounding domain ``D`` admit a special structure under shape differentiation ([Paper](https://doi.org/10.1051/m2an/2019056)). Such cases are not captured by a Gâteaux derivative at ``\varphi`` under relaxation. !!! note - In future, we plan to implement CellFEM via GridapEmbedded in LevelSetTopOpt. This will enable Gâteaux derivative of the mapping + In future, we plan to implement CellFEM via GridapEmbedded in GridapTopOpt. This will enable Gâteaux derivative of the mapping ```math \varphi \mapsto \int_{\Omega(\varphi)}f(\varphi)~\mathrm{d}\boldsymbol{x} + \int_{\Omega(\varphi)^\complement}f(\varphi)~\mathrm{d}\boldsymbol{x}. diff --git a/docs/src/getting-started.md b/docs/src/getting-started.md index 80775cda..841ad21c 100644 --- a/docs/src/getting-started.md +++ b/docs/src/getting-started.md @@ -2,11 +2,11 @@ ## Installation -`LevelSetTopOpt.jl` and additional dependencies can be installed in an existing Julia environment using the package manager. This can be accessed in the Julia REPL (read-eval–print loop) by pressing `]`. We then add the required packages via: +`GridapTopOpt.jl` and additional dependencies can be installed in an existing Julia environment using the package manager. This can be accessed in the Julia REPL (read-eval–print loop) by pressing `]`. We then add the required packages via: ``` -pkg> add LevelSetTopOpt, Gridap, GridapDistributed, GridapPETSc, GridapSolvers, PartitionedArrays, SparseMatricesCSR +pkg> add GridapTopOpt, Gridap, GridapDistributed, GridapPETSc, GridapSolvers, PartitionedArrays, SparseMatricesCSR ``` -Once installed, serial driver scripts can be run immediately, whereas parallel problems also require an MPI installation. +Once installed, serial driver scripts can be run immediately, whereas parallel problems also require an MPI installation. ### MPI For basic users, [`MPI.jl`](https://github.com/JuliaParallel/MPI.jl) provides such an implementation and a Julia wrapper for `mpiexec` - the MPI executor. This is installed via: @@ -19,18 +19,18 @@ Once the `mpiexecjl` wrapper has been added to the system `PATH`, MPI scripts ca ``` mpiexecjl -n P julia main.jl ``` -where `main` is a driver script, `P` denotes the number of processors. +where `main` is a driver script, `P` denotes the number of processors. ### PETSc -In `LevelSetTopOpt.jl` we rely on the [`GridapPETSc.jl`](https://github.com/gridap/GridapPETSc.jl) satellite package to interface with the linear and nonlinear solvers provided by the PETSc (Portable, Extensible Toolkit for Scientific Computation) library. For basic users these solvers are provided by `GridapPETSc.jl` with no additional work. +In `GridapTopOpt.jl` we rely on the [`GridapPETSc.jl`](https://github.com/gridap/GridapPETSc.jl) satellite package to interface with the linear and nonlinear solvers provided by the PETSc (Portable, Extensible Toolkit for Scientific Computation) library. For basic users these solvers are provided by `GridapPETSc.jl` with no additional work. ### Advanced installation For more advanced installations, such as use of a custom MPI/PETSc installation on a HPC cluster, we refer the reader to the [discussion](https://github.com/gridap/GridapPETSc.jl) for `GridapPETSc.jl` and the [configuration page](https://juliaparallel.org/MPI.jl/stable/configuration/) for `MPI.jl`. ## Usage and tutorials -In order to get familiar with the library we recommend following the numerical examples described in: +In order to get familiar with the library we recommend following the numerical examples described in: -> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "LevelSetTopOpt.jl: A scalable computational toolbox for level set-based topology optimisation". In preparation. +> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "GridapTopOpt.jl: A scalable computational toolbox for level set-based topology optimisation". In preparation. In addition, there are several driver scripts available in `/scripts/..` @@ -38,5 +38,5 @@ More general tutorials for familiarising ones self with Gridap are available via ## Known issues - PETSc's GAMG preconditioner breaks for split Dirichlet DoFs (e.g., x constrained while y free for a single node). There is no simple fix for this. We recommend instead using MUMPS or another preconditioner for this case. -- Currently, our implementation of automatic differentiation does not support multiplication and division of optimisation functionals. We plan to add this in a future release of `LevelSetTopOpt.jl` -- Issue [#38](https://github.com/zjwegert/LSTO_Distributed/issues/38). -- Analytic gradient breaks in parallel for integrals of certain measures -- Issue [#46](https://github.com/zjwegert/LSTO_Distributed/issues/46) \ No newline at end of file +- Currently, our implementation of automatic differentiation does not support multiplication and division of optimisation functionals. We plan to add this in a future release of `GridapTopOpt.jl` -- Issue [#38](https://github.com/zjwegert/GridapTopOpt/issues/38). +- Analytic gradient breaks in parallel for integrals of certain measures -- Issue [#46](https://github.com/zjwegert/GridapTopOpt/issues/46) \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md index e3b8ed44..c8239bcd 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,10 +1,10 @@ -# LevelSetTopOpt.jl -Welcome to the documentation for `LevelSetTopOpt.jl`! +# GridapTopOpt.jl +Welcome to the documentation for `GridapTopOpt.jl`! ## Introduction -`LevelSetTopOpt.jl` is computational toolbox for level set-based topology optimisation implemented in Julia and the Gridap package ecosystem. The core design principle of `LevelSetTopOpt.jl` is to provide an extendable framework for solving optimisation problems in serial or parallel with a high-level programming interface and automatic differentiation. See the following publication for further details: +`GridapTopOpt.jl` is computational toolbox for level set-based topology optimisation implemented in Julia and the Gridap package ecosystem. The core design principle of `GridapTopOpt.jl` is to provide an extendable framework for solving optimisation problems in serial or parallel with a high-level programming interface and automatic differentiation. See the following publication for further details: -> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "LevelSetTopOpt.jl: A scalable computational toolbox for level set-based topology optimisation". In preparation. +> Zachary J. Wegert, Jordi Manyer, Connor Mallon, Santiago Badia, and Vivien J. Challis (2024). "GridapTopOpt.jl: A scalable computational toolbox for level set-based topology optimisation". In preparation. ## How to use this documentation @@ -16,7 +16,7 @@ Welcome to the documentation for `LevelSetTopOpt.jl`! ## Julia educational resources -A basic knowledge of the Julia programming language is needed to use the `LevelSetTopOpt.jl` package. +A basic knowledge of the Julia programming language is needed to use the `GridapTopOpt.jl` package. Here, one can find a list of resources to get started with this programming language. * First steps to learn Julia form the [Gridap wiki](https://github.com/gridap/Gridap.jl/wiki/Start-learning-Julia) page. diff --git a/docs/src/reference/benchmarking.md b/docs/src/reference/benchmarking.md index 617ee276..59594ae3 100644 --- a/docs/src/reference/benchmarking.md +++ b/docs/src/reference/benchmarking.md @@ -1,16 +1,16 @@ # Benchmarking ```@docs -LevelSetTopOpt.benchmark +GridapTopOpt.benchmark ``` ## Existing benchmark methods ```@docs -LevelSetTopOpt.benchmark_optimizer -LevelSetTopOpt.benchmark_single_iteration -LevelSetTopOpt.benchmark_forward_problem -LevelSetTopOpt.benchmark_advection -LevelSetTopOpt.benchmark_reinitialisation -LevelSetTopOpt.benchmark_velocity_extension -LevelSetTopOpt.benchmark_hilbertian_projection_map +GridapTopOpt.benchmark_optimizer +GridapTopOpt.benchmark_single_iteration +GridapTopOpt.benchmark_forward_problem +GridapTopOpt.benchmark_advection +GridapTopOpt.benchmark_reinitialisation +GridapTopOpt.benchmark_velocity_extension +GridapTopOpt.benchmark_hilbertian_projection_map ``` \ No newline at end of file diff --git a/docs/src/reference/chainrules.md b/docs/src/reference/chainrules.md index 36559333..00d6f058 100644 --- a/docs/src/reference/chainrules.md +++ b/docs/src/reference/chainrules.md @@ -3,42 +3,42 @@ ## `PDEConstrainedFunctionals` ```@docs -LevelSetTopOpt.PDEConstrainedFunctionals -LevelSetTopOpt.evaluate! -LevelSetTopOpt.evaluate_functionals! -LevelSetTopOpt.evaluate_derivatives! -LevelSetTopOpt.get_state +GridapTopOpt.PDEConstrainedFunctionals +GridapTopOpt.evaluate! +GridapTopOpt.evaluate_functionals! +GridapTopOpt.evaluate_derivatives! +GridapTopOpt.get_state ``` ## `StateParamIntegrandWithMeasure` ```@docs -LevelSetTopOpt.StateParamIntegrandWithMeasure -LevelSetTopOpt.rrule(u_to_j::LevelSetTopOpt.StateParamIntegrandWithMeasure,uh,φh) +GridapTopOpt.StateParamIntegrandWithMeasure +GridapTopOpt.rrule(u_to_j::GridapTopOpt.StateParamIntegrandWithMeasure,uh,φh) ``` ## Implemented types of `AbstractFEStateMap` ```@docs -LevelSetTopOpt.AbstractFEStateMap +GridapTopOpt.AbstractFEStateMap ``` ### `AffineFEStateMap` ```@docs -LevelSetTopOpt.AffineFEStateMap -LevelSetTopOpt.AffineFEStateMap(a::Function,l::Function,U,V,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U,V),assem_adjoint = SparseMatrixAssembler(V,U),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),ls::LinearSolver = LUSolver(),adjoint_ls::LinearSolver = LUSolver()) +GridapTopOpt.AffineFEStateMap +GridapTopOpt.AffineFEStateMap(a::Function,l::Function,U,V,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U,V),assem_adjoint = SparseMatrixAssembler(V,U),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),ls::LinearSolver = LUSolver(),adjoint_ls::LinearSolver = LUSolver()) ``` ### `NonlinearFEStateMap` ```@docs -LevelSetTopOpt.NonlinearFEStateMap -LevelSetTopOpt.NonlinearFEStateMap(res::Function,U,V,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U,V),assem_adjoint = SparseMatrixAssembler(V,U),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),nls::NonlinearSolver = NewtonSolver(LUSolver();maxiter=50,rtol=1.e-8,verbose=true),adjoint_ls::LinearSolver = LUSolver()) +GridapTopOpt.NonlinearFEStateMap +GridapTopOpt.NonlinearFEStateMap(res::Function,U,V,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U,V),assem_adjoint = SparseMatrixAssembler(V,U),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),nls::NonlinearSolver = NewtonSolver(LUSolver();maxiter=50,rtol=1.e-8,verbose=true),adjoint_ls::LinearSolver = LUSolver()) ``` ### `RepeatingAffineFEStateMap` ```@docs -LevelSetTopOpt.RepeatingAffineFEStateMap -LevelSetTopOpt.RepeatingAffineFEStateMap(nblocks::Int,a::Function,l::Vector{<:Function},U0,V0,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U0,V0),assem_adjoint = SparseMatrixAssembler(V0,U0),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),ls::LinearSolver = LUSolver(),adjoint_ls::LinearSolver = LUSolver()) +GridapTopOpt.RepeatingAffineFEStateMap +GridapTopOpt.RepeatingAffineFEStateMap(nblocks::Int,a::Function,l::Vector{<:Function},U0,V0,V_φ,U_reg,φh,dΩ...;assem_U = SparseMatrixAssembler(U0,V0),assem_adjoint = SparseMatrixAssembler(V0,U0),assem_deriv = SparseMatrixAssembler(U_reg,U_reg),ls::LinearSolver = LUSolver(),adjoint_ls::LinearSolver = LUSolver()) ``` ## Advanced @@ -47,32 +47,32 @@ LevelSetTopOpt.RepeatingAffineFEStateMap(nblocks::Int,a::Function,l::Vector{<:Fu #### Existing methods ```@docs -LevelSetTopOpt.rrule(φ_to_u::LevelSetTopOpt.AbstractFEStateMap,φh) -LevelSetTopOpt.pullback +GridapTopOpt.rrule(φ_to_u::GridapTopOpt.AbstractFEStateMap,φh) +GridapTopOpt.pullback ``` #### Required to implement ```@docs -LevelSetTopOpt.forward_solve! -LevelSetTopOpt.adjoint_solve! -LevelSetTopOpt.update_adjoint_caches! -LevelSetTopOpt.dRdφ -LevelSetTopOpt.get_state(::LevelSetTopOpt.AbstractFEStateMap) -LevelSetTopOpt.get_measure -LevelSetTopOpt.get_spaces -LevelSetTopOpt.get_assemblers -LevelSetTopOpt.get_trial_space -LevelSetTopOpt.get_test_space -LevelSetTopOpt.get_aux_space -LevelSetTopOpt.get_deriv_space -LevelSetTopOpt.get_pde_assembler -LevelSetTopOpt.get_deriv_assembler +GridapTopOpt.forward_solve! +GridapTopOpt.adjoint_solve! +GridapTopOpt.update_adjoint_caches! +GridapTopOpt.dRdφ +GridapTopOpt.get_state(::GridapTopOpt.AbstractFEStateMap) +GridapTopOpt.get_measure +GridapTopOpt.get_spaces +GridapTopOpt.get_assemblers +GridapTopOpt.get_trial_space +GridapTopOpt.get_test_space +GridapTopOpt.get_aux_space +GridapTopOpt.get_deriv_space +GridapTopOpt.get_pde_assembler +GridapTopOpt.get_deriv_assembler ``` ### `IntegrandWithMeasure` ```@docs -LevelSetTopOpt.IntegrandWithMeasure -LevelSetTopOpt.gradient -LevelSetTopOpt.jacobian +GridapTopOpt.IntegrandWithMeasure +GridapTopOpt.gradient +GridapTopOpt.jacobian ``` \ No newline at end of file diff --git a/docs/src/reference/io.md b/docs/src/reference/io.md index e4c4ff89..ce7a6127 100644 --- a/docs/src/reference/io.md +++ b/docs/src/reference/io.md @@ -1,21 +1,21 @@ # IO -In LevelSetTopOpt, the usual IO from [Gridap](https://github.com/gridap/Gridap.jl/) is available. In addition, we also implement the below IO for convenience. +In GridapTopOpt, the usual IO from [Gridap](https://github.com/gridap/Gridap.jl/) is available. In addition, we also implement the below IO for convenience. ## Optimiser history ```@docs -LevelSetTopOpt.write_history +GridapTopOpt.write_history ``` ## Object IO in serial ```@docs -LevelSetTopOpt.save -LevelSetTopOpt.load -LevelSetTopOpt.load! +GridapTopOpt.save +GridapTopOpt.load +GridapTopOpt.load! ``` ## Object IO in parallel ```@docs -LevelSetTopOpt.psave -LevelSetTopOpt.pload -LevelSetTopOpt.pload! +GridapTopOpt.psave +GridapTopOpt.pload +GridapTopOpt.pload! ``` \ No newline at end of file diff --git a/docs/src/reference/levelsetevolution.md b/docs/src/reference/levelsetevolution.md index 3f41a926..0ddcc12f 100644 --- a/docs/src/reference/levelsetevolution.md +++ b/docs/src/reference/levelsetevolution.md @@ -1,39 +1,39 @@ # LevelSetEvolution -In LevelSetTopOpt, the level set is evolved and reinitialised using a `LevelSetEvolution` method. The most standard of these is the Hamilton-Jacobi evolution equation solved using a first order upwind finite difference scheme. A forward Euler in time method is provided below via `HamiltonJacobiEvolution <: LevelSetEvolution` along with an upwind finite difference stencil for the spatial discretisation via `FirstOrderStencil`. +In GridapTopOpt, the level set is evolved and reinitialised using a `LevelSetEvolution` method. The most standard of these is the Hamilton-Jacobi evolution equation solved using a first order upwind finite difference scheme. A forward Euler in time method is provided below via `HamiltonJacobiEvolution <: LevelSetEvolution` along with an upwind finite difference stencil for the spatial discretisation via `FirstOrderStencil`. This can be extended in several ways. For example, higher order spatial stencils can be implemented by extending the `Stencil` interface below. In addition, more advanced ODE solvers could be implemented (e.g., Runge–Kutta methods) or entirely different level set evolution methods by extending the `LevelSetEvolution` interface below. ## `HamiltonJacobiEvolution` ```@docs -LevelSetTopOpt.HamiltonJacobiEvolution -LevelSetTopOpt.HamiltonJacobiEvolution(stencil::LevelSetTopOpt.Stencil,model,space,tol=1.e-3,max_steps=100,max_steps_reinit=2000) -LevelSetTopOpt.evolve! -LevelSetTopOpt.reinit! -LevelSetTopOpt.get_dof_Δ(m::HamiltonJacobiEvolution) +GridapTopOpt.HamiltonJacobiEvolution +GridapTopOpt.HamiltonJacobiEvolution(stencil::GridapTopOpt.Stencil,model,space,tol=1.e-3,max_steps=100,max_steps_reinit=2000) +GridapTopOpt.evolve! +GridapTopOpt.reinit! +GridapTopOpt.get_dof_Δ(m::HamiltonJacobiEvolution) ``` ## Spatial stencils for `HamiltonJacobiEvolution` ```@docs -LevelSetTopOpt.FirstOrderStencil +GridapTopOpt.FirstOrderStencil ``` ## Custom `Stencil` ```@docs -LevelSetTopOpt.Stencil -LevelSetTopOpt.evolve!(::LevelSetTopOpt.Stencil,φ,vel,Δt,Δx,isperiodic,caches) -LevelSetTopOpt.reinit!(::LevelSetTopOpt.Stencil,φ_new,φ,vel,Δt,Δx,isperiodic,caches) -LevelSetTopOpt.allocate_caches(::LevelSetTopOpt.Stencil,φ,vel) -LevelSetTopOpt.check_order +GridapTopOpt.Stencil +GridapTopOpt.evolve!(::GridapTopOpt.Stencil,φ,vel,Δt,Δx,isperiodic,caches) +GridapTopOpt.reinit!(::GridapTopOpt.Stencil,φ_new,φ,vel,Δt,Δx,isperiodic,caches) +GridapTopOpt.allocate_caches(::GridapTopOpt.Stencil,φ,vel) +GridapTopOpt.check_order ``` ## Custom `LevelSetEvolution` To implement a custom level set evolution method, we can extend the methods below. For example, one could consider Reaction-Diffusion-based evolution of the level set function. This can be solved with a finite element method and so we can implement a new type that inherits from `LevelSetEvolution` independently of the `Stencil` types. ```@docs -LevelSetTopOpt.LevelSetEvolution -LevelSetTopOpt.evolve!(::LevelSetTopOpt.LevelSetEvolution,φ,args...) -LevelSetTopOpt.reinit!(::LevelSetTopOpt.LevelSetEvolution,φ,args...) -LevelSetTopOpt.get_dof_Δ(::LevelSetTopOpt.LevelSetEvolution) +GridapTopOpt.LevelSetEvolution +GridapTopOpt.evolve!(::GridapTopOpt.LevelSetEvolution,φ,args...) +GridapTopOpt.reinit!(::GridapTopOpt.LevelSetEvolution,φ,args...) +GridapTopOpt.get_dof_Δ(::GridapTopOpt.LevelSetEvolution) ``` \ No newline at end of file diff --git a/docs/src/reference/optimisers.md b/docs/src/reference/optimisers.md index fbbe278e..78b8287c 100644 --- a/docs/src/reference/optimisers.md +++ b/docs/src/reference/optimisers.md @@ -1,37 +1,37 @@ # Optimisers -In LevelSetTopOpt we implement optimisation algorithms as [iterators](https://docs.julialang.org/en/v1/manual/interfaces/) that inherit from an abstract type `Optimiser`. A concrete `Optimiser` implementation, say `OptEg`, then implements `iterate(m::OptEg) ↦ (var,state)` and `iterate(m::OptEg,state) ↦ (var,state)`, where `var` and `state` are the available items in the outer loop and internal state of the iterator, respectively. As a result we can iterate over the object `m=OptEg(...)` using `for var in m`. The benefit of this implementation is that the internals of the optimisation method can be hidden in the source code while the explicit `for` loop is still visible to the user. The body of the loop can then be used for auxiliary operations such as writing the optimiser history and other files. +In GridapTopOpt we implement optimisation algorithms as [iterators](https://docs.julialang.org/en/v1/manual/interfaces/) that inherit from an abstract type `Optimiser`. A concrete `Optimiser` implementation, say `OptEg`, then implements `iterate(m::OptEg) ↦ (var,state)` and `iterate(m::OptEg,state) ↦ (var,state)`, where `var` and `state` are the available items in the outer loop and internal state of the iterator, respectively. As a result we can iterate over the object `m=OptEg(...)` using `for var in m`. The benefit of this implementation is that the internals of the optimisation method can be hidden in the source code while the explicit `for` loop is still visible to the user. The body of the loop can then be used for auxiliary operations such as writing the optimiser history and other files. -The below describes the implemented optimisers along with the `OptimiserHistory` type. Custom optimisers can be implemented by creating types that inherit from `Optimiser` and extending the interfaces in [Custom optimiser](@ref). +The below describes the implemented optimisers along with the `OptimiserHistory` type. Custom optimisers can be implemented by creating types that inherit from `Optimiser` and extending the interfaces in [Custom optimiser](@ref). -## Lagrangian & Augmented Lagrangian method +## Lagrangian & Augmented Lagrangian method ```@autodocs -Modules = [LevelSetTopOpt] +Modules = [GridapTopOpt] Pages = ["Optimisers/AugmentedLagrangian.jl"] ``` ## Hilbertian projection method ```@autodocs -Modules = [LevelSetTopOpt] +Modules = [GridapTopOpt] Pages = ["Optimisers/HilbertianProjection.jl"] ``` ```@autodocs -Modules = [LevelSetTopOpt] +Modules = [GridapTopOpt] Pages = ["Optimisers/OrthogonalisationMaps.jl"] ``` ## Optimiser history ```@docs -LevelSetTopOpt.OptimiserHistory -LevelSetTopOpt.OptimiserHistorySlice +GridapTopOpt.OptimiserHistory +GridapTopOpt.OptimiserHistorySlice ``` ## Custom `Optimiser` ```@docs -LevelSetTopOpt.Optimiser -LevelSetTopOpt.iterate(::LevelSetTopOpt.Optimiser) -LevelSetTopOpt.iterate(::LevelSetTopOpt.Optimiser,state) -LevelSetTopOpt.get_history(::LevelSetTopOpt.Optimiser) -LevelSetTopOpt.converged(::LevelSetTopOpt.Optimiser) +GridapTopOpt.Optimiser +GridapTopOpt.iterate(::GridapTopOpt.Optimiser) +GridapTopOpt.iterate(::GridapTopOpt.Optimiser,state) +GridapTopOpt.get_history(::GridapTopOpt.Optimiser) +GridapTopOpt.converged(::GridapTopOpt.Optimiser) ``` \ No newline at end of file diff --git a/docs/src/reference/utilities.md b/docs/src/reference/utilities.md index 02bc4ba7..6fc92a5f 100644 --- a/docs/src/reference/utilities.md +++ b/docs/src/reference/utilities.md @@ -2,18 +2,18 @@ ## Ersatz material interpolation ```@docs -LevelSetTopOpt.SmoothErsatzMaterialInterpolation +GridapTopOpt.SmoothErsatzMaterialInterpolation ``` ## Mesh labelling ```@docs -LevelSetTopOpt.update_labels! +GridapTopOpt.update_labels! ``` ## Helpers ```@docs -LevelSetTopOpt.initial_lsf -LevelSetTopOpt.isotropic_elast_tensor -LevelSetTopOpt.get_el_Δ +GridapTopOpt.initial_lsf +GridapTopOpt.isotropic_elast_tensor +GridapTopOpt.get_el_Δ ``` \ No newline at end of file diff --git a/docs/src/reference/velext.md b/docs/src/reference/velext.md index 7e94e680..4179b745 100644 --- a/docs/src/reference/velext.md +++ b/docs/src/reference/velext.md @@ -1,6 +1,6 @@ # Velocity extension ```@autodocs -Modules = [LevelSetTopOpt] +Modules = [GridapTopOpt] Pages = ["VelocityExtension.jl"] ``` \ No newline at end of file diff --git a/docs/src/tutorials/minimum_thermal_compliance.md b/docs/src/tutorials/minimum_thermal_compliance.md index 3d52db3c..966a71f3 100644 --- a/docs/src/tutorials/minimum_thermal_compliance.md +++ b/docs/src/tutorials/minimum_thermal_compliance.md @@ -3,7 +3,7 @@ The goal of this tutorial is to learn - How to formulate a topology optimisation problem - How to describe the problem over a fixed computational domain ``D`` via the level-set method. -- How to setup and solve the problem in LevelSetTopOpt +- How to setup and solve the problem in GridapTopOpt We consider the following extensions at the end of the tutorial: - How to extend problems to 3D and utilise PETSc solvers @@ -63,7 +63,7 @@ For this tutorial, we consider minimising the thermal compliance (or dissipated \right. \end{aligned} ``` -where ``\operatorname{Vol}(\Omega)=\int_\Omega1~\mathrm{d}\boldsymbol{x}``. This objective is equivalent to equivalent to maximising the heat transfer efficiency through ``\Omega``. +where ``\operatorname{Vol}(\Omega)=\int_\Omega1~\mathrm{d}\boldsymbol{x}``. This objective is equivalent to equivalent to maximising the heat transfer efficiency through ``\Omega``. ## Shape differentiation @@ -74,7 +74,7 @@ Suppose that we consider smooth variations of the domain ``\Omega`` of the form !!! note "Definition [3]" The shape derivative of ``J(\Omega)`` at ``\Omega`` is defined as the Fréchet derivative in ``W^{1, \infty}(\mathbb{R}^d, \mathbb{R}^d)`` at ``\boldsymbol{\theta}`` of the application ``\boldsymbol{\theta} \rightarrow J(\Omega_{\boldsymbol{\theta}})``, i.e., ```math - J(\Omega_{\boldsymbol{\theta}})(\Omega)=J(\Omega)+J^{\prime}(\Omega)(\boldsymbol{\theta})+\mathrm{o}(\boldsymbol{\theta}) + J(\Omega_{\boldsymbol{\theta}})(\Omega)=J(\Omega)+J^{\prime}(\Omega)(\boldsymbol{\theta})+\mathrm{o}(\boldsymbol{\theta}) ``` with ``\lim _{\boldsymbol{\theta} \rightarrow 0} \frac{\lvert\mathrm{o}(\boldsymbol{\theta})\rvert}{\|\boldsymbol{\theta}\|}=0,`` where the shape derivative ``J^{\prime}(\Omega)`` is a continuous linear form on ``W^{1, \infty}(\mathbb{R}^d, \mathbb{R}^d)`` @@ -97,7 +97,7 @@ and \operatorname{Vol}'(\Omega)(-q\boldsymbol{n}) = -\int_{\Gamma}q~\mathrm{d}s. ``` -## Discretisation via a level set +## Discretisation via a level set Suppose that we attribute a level set function ``\varphi:D\rightarrow\mathbb{R}`` to our domain ``\Omega\subset D`` with ``\bar{\Omega}=\lbrace \boldsymbol{x}:\varphi(\boldsymbol{x})\leq0\rbrace`` and ``\Omega^\complement=\lbrace \boldsymbol{x}:\varphi(\boldsymbol{x})>0\rbrace``. We can then define a smooth characteristic function ``I:\mathbb{R}\rightarrow[\epsilon,1]`` as ``I(\varphi)=(1-H(\varphi))+\epsilon H(\varphi)`` where ``H`` is a smoothed Heaviside function with smoothing radius ``\eta``, and ``\epsilon\ll1`` allows for an ersatz material approximation. Of course, ``\epsilon`` can be taken as zero depending on the computational regime. Over the fixed computational domain we may relax integrals to be over all of ``D`` via ``\mathrm{d}\boldsymbol{x}= H(\varphi)~\mathrm{d}\boldsymbol{x}`` and ``\mathrm{d}s = H'(\varphi)\lvert\nabla\varphi\rvert~\mathrm{d}\boldsymbol{x}``. The above optimisation problem then rewrites in terms of ``\varphi`` as @@ -122,14 +122,14 @@ C(\varphi)&=\int_D (\rho(\varphi) - V_f)/\operatorname{Vol}(D)~\mathrm{d}\boldsy &=\int_D \rho(\varphi)~\mathrm{d}\boldsymbol{x}/\operatorname{Vol}(D) - V_f\\ &=\int_\Omega~\mathrm{d}\boldsymbol{x}/\operatorname{Vol}(D)-V_f = \operatorname{Vol}(\Omega)/\operatorname{Vol}(D)-V_f \end{aligned} -``` +``` where ``\rho(\varphi)=1-H(\varphi)`` is the smoothed volume density function. !!! note - In LevelSetTopOpt we assume constraints are of the integral form above. + In GridapTopOpt we assume constraints are of the integral form above. -The shape derivatives from the previous section can be relaxed over the computational domain as +The shape derivatives from the previous section can be relaxed over the computational domain as ```math J'(\varphi)(-q\boldsymbol{n}) = \int_{D}q\kappa\boldsymbol{\nabla}(u)\cdot\boldsymbol{\nabla}(u)H'(\varphi)\lvert\nabla\varphi\rvert~\mathrm{d}\boldsymbol{x} @@ -141,16 +141,16 @@ C'(\varphi)(-q\boldsymbol{n}) = -\int_{D}qH'(\varphi)\lvert\nabla\varphi\rvert~\ ## Computational method -In the following, we discuss the implementation of the above optimisation problem in LevelSetTopOpt. For the purpose of this tutorial we break the computational formulation into chunks. +In the following, we discuss the implementation of the above optimisation problem in GridapTopOpt. For the purpose of this tutorial we break the computational formulation into chunks. The first step in creating our script is to load any packages required: ```julia -using LevelSetTopOpt, Gridap +using GridapTopOpt, Gridap ``` ### Parameters -The following are user defined parameters for the problem. -These parameters will be discussed over the course of this tutorial. +The following are user defined parameters for the problem. +These parameters will be discussed over the course of this tutorial. ```julia # FE parameters @@ -199,7 +199,7 @@ dΓ_N = Measure(Γ_N,2*order) ``` where `2*order` indicates the quadrature degree for numerical integration. -The final stage of the finite element setup is the approximation of the finite element spaces. This is given as follows: +The final stage of the finite element setup is the approximation of the finite element spaces. This is given as follows: ```julia # Spaces reffe = ReferenceFE(lagrangian,Float64,order) @@ -223,7 +223,7 @@ For this problem we set `lsf_func` using the function [`initial_lsf`](@ref) in t ```math \varphi_{\xi,a}(\boldsymbol{x})=-\frac{1}{4} \prod_i^D(\cos(\xi\pi x_i)) - a/4 ``` -with ``\xi,a=(4,0.2)`` and ``D=2`` in two dimensions. +with ``\xi,a=(4,0.2)`` and ``D=2`` in two dimensions. We also generate a smooth characteristic function of radius ``\eta`` using: @@ -238,7 +238,7 @@ This the [`SmoothErsatzMaterialInterpolation`](@ref) structure defines the chara |:--:| |Figure 2: A visualisation of the initial level set function and the interpolated density function ``\rho`` for ``\Omega``.| -Optional: we can generate a VTK file for visualisation in Paraview via +Optional: we can generate a VTK file for visualisation in Paraview via ```julia writevtk(Ω,"initial_lsf",cellfields=["phi"=>φh, "ρ(phi)"=>(ρ ∘ φh),"|nabla(phi)|"=>(norm ∘ ∇(φh))]) @@ -292,17 +292,17 @@ In this case, the analytic shape derivatives are passed as optional arguments. W ### Velocity extension-regularisation method -The Hilbertian extension-regularisation [4] method involves solving an -identification problem over a Hilbert space ``H`` on ``D`` with -inner product ``\langle\cdot,\cdot\rangle_H``: +The Hilbertian extension-regularisation [4] method involves solving an +identification problem over a Hilbert space ``H`` on ``D`` with +inner product ``\langle\cdot,\cdot\rangle_H``: *Find* ``g_\Omega\in H`` *such that* ``\langle g_\Omega,q\rangle_H =-J^{\prime}(\Omega)(q\boldsymbol{n})~ \forall q\in H.`` -This provides two benefits: - 1) It naturally extends the shape sensitivity from ``\partial\Omega`` +This provides two benefits: + 1) It naturally extends the shape sensitivity from ``\partial\Omega`` onto the bounding domain ``D``; and - 2) ensures a descent direction for ``J(\Omega)`` with additional regularity + 2) ensures a descent direction for ``J(\Omega)`` with additional regularity (i.e., ``H`` as opposed to ``L^2(\partial\Omega)``). For our problem above we take the inner product @@ -317,7 +317,7 @@ where ``\alpha`` is the smoothing length scale. Equivalently in our script we ha a_hilb(p,q) =∫(α^2*∇(p)⋅∇(q) + p*q)dΩ ``` -We then build an object [`VelocityExtension`](@ref). This object provides a method [`project!`](@ref) that applies the Hilbertian velocity-extension method to a given shape derivative. +We then build an object [`VelocityExtension`](@ref). This object provides a method [`project!`](@ref) that applies the Hilbertian velocity-extension method to a given shape derivative. ```julia vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) @@ -360,7 +360,7 @@ We may now create the optimiser object. This structure holds all information reg optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh;γ,γ_reinit,verbose=true,constraint_names=[:Vol]) ``` -As optimisers inheriting from [`LevelSetTopOpt.Optimiser`](@ref) implement Julia's iterator functionality, we can solve the optimisation problem to convergence by iterating over the optimiser: +As optimisers inheriting from [`GridapTopOpt.Optimiser`](@ref) implement Julia's iterator functionality, we can solve the optimisation problem to convergence by iterating over the optimiser: ```julia # Solve @@ -379,7 +379,7 @@ end ``` !!! warning - Due to a possible memory leak in Julia 1.9.* IO, we include a call to the garbage collector using `GC.gc()`. + Due to a possible memory leak in Julia 1.9.* IO, we include a call to the garbage collector using `GC.gc()`. Depending on whether we use `iszero(it % iter_mod)`, the VTK file for the final structure may need to be saved using @@ -398,7 +398,7 @@ writevtk(Ω,path*"_$it",cellfields=["phi"=>φh, ``` ```julia -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt # FE parameters order = 1 # Finite element order @@ -504,7 +504,7 @@ The first and most straightforward in terms of programmatic changes is extending |:--:| |Figure 4: The setup for the three-dimensional minimum thermal compliance problem.| -We use a unit cube for the bounding domain ``D`` with ``50^3`` elements. This corresponds to changing lines 5-7 in the above script to +We use a unit cube for the bounding domain ``D`` with ``50^3`` elements. This corresponds to changing lines 5-7 in the above script to ```julia xmax=ymax=zmax=1.0 # Domain size @@ -539,7 +539,7 @@ tol = 1/(2order^2)/minimum(el_size) # Advection tolerance ``` ```julia -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt # FE parameters order = 1 # Finite element order @@ -623,10 +623,10 @@ writevtk(Ω,path*"struc_$it",cellfields=["phi"=>φh, ``` -At this stage the problem will not be possible to run as we're using a standard LU solver. For this reason we now consider adjusting Script 2 to use an iterative solver provided by PETSc. We rely on the GridapPETSc satellite package to utilise PETSc. This provides the necessary structures to efficiently interface with the linear and nonlinear solvers provided by the PETSc library. To call GridapPETSc we change line 1 of Script 2 to +At this stage the problem will not be possible to run as we're using a standard LU solver. For this reason we now consider adjusting Script 2 to use an iterative solver provided by PETSc. We rely on the GridapPETSc satellite package to utilise PETSc. This provides the necessary structures to efficiently interface with the linear and nonlinear solvers provided by the PETSc library. To call GridapPETSc we change line 1 of Script 2 to ```julia -using Gridap, GridapPETSc, SparseMatricesCSR, LevelSetTopOpt +using Gridap, GridapPETSc, SparseMatricesCSR, GridapTopOpt ``` We also use `SparseMatricesCSR` as PETSc is based on the `SparseMatrixCSR` datatype. We then replace line 52 and 63 with @@ -655,13 +655,13 @@ respectively. Here we specify that the `SparseMatrixAssembler` should be based o Finally, we wrap the entire script in a function and call it inside a `GridapPETSc.with` block. This ensures that PETSc is safely initialised. This should take the form ```Julia -using Gridap, GridapPETSc, SparseMatricesCSR, LevelSetTopOpt +using Gridap, GridapPETSc, SparseMatricesCSR, GridapTopOpt function main() ... end -solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true +solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" GridapPETSc.with(args=split(solver_options)) do main() @@ -675,7 +675,7 @@ We utilise a conjugate gradient method with geometric algebraic multigrid precon ``` ```julia -using Gridap, GridapPETSc, SparseMatricesCSR, LevelSetTopOpt +using Gridap, GridapPETSc, SparseMatricesCSR, GridapTopOpt function main() # FE parameters @@ -769,7 +769,7 @@ function main() "H(phi)"=>(H ∘ φh),"|nabla(phi)|"=>(norm ∘ ∇(φh)),"uh"=>uh]) end -solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true +solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" GridapPETSc.with(args=split(solver_options)) do main() @@ -788,10 +788,10 @@ We can run this script and visualise the initial and final structures using Para ### Serial to MPI -Script 3 contains no parallelism to enable further speedup or scalability. To enable MPI-based computing we rely on the tools implemented in PartitionedArrays and GridapDistributed. Further information regarding these packages and how they interface with LevelSetTopOpt can be found [here](../usage/mpi-mode.md). To add these packages we adjust the first line of our script: +Script 3 contains no parallelism to enable further speedup or scalability. To enable MPI-based computing we rely on the tools implemented in PartitionedArrays and GridapDistributed. Further information regarding these packages and how they interface with GridapTopOpt can be found [here](../usage/mpi-mode.md). To add these packages we adjust the first line of our script: ```julia -using Gridap, GridapPETSc, GridapDistributed, PartitionedArrays, SparseMatricesCSR, LevelSetTopOpt +using Gridap, GridapPETSc, GridapDistributed, PartitionedArrays, SparseMatricesCSR, GridapTopOpt ``` Before we change any parts of the function `main`, we adjust the end of the script to safely launch MPI inside a Julia `do` block. We replace lines 95-99 in Script 3 with @@ -799,7 +799,7 @@ Before we change any parts of the function `main`, we adjust the end of the scri ```julia with_mpi() do distribute mesh_partition = (2,2,2) - solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" GridapPETSc.with(args=split(solver_options)) do main(mesh_partition,distribute) @@ -838,7 +838,7 @@ That's it! These are the only changes that are necessary to run your application ``` ```julia -using Gridap, GridapPETSc, GridapDistributed, PartitionedArrays, SparseMatricesCSR, LevelSetTopOpt +using Gridap, GridapPETSc, GridapDistributed, PartitionedArrays, SparseMatricesCSR, GridapTopOpt function main(mesh_partition,distribute) ranks = distribute(LinearIndices((prod(mesh_partition),))) @@ -936,7 +936,7 @@ end with_mpi() do distribute mesh_partition = (2,2,2) - solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + solver_options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" GridapPETSc.with(args=split(solver_options)) do main(mesh_partition,distribute) @@ -980,7 +980,7 @@ As we're considering a 2D problem, we consider modifications of Script 1 as foll κ(u) = exp(-u) # Diffusivity ``` -We then replace the `a` and `l` on line 48 and 49 by the residual `R` given by +We then replace the `a` and `l` on line 48 and 49 by the residual `R` given by ```julia R(u,v,φ,dΩ,dΓ_N) = ∫((I ∘ φ)*(κ ∘ u)*∇(u)⋅∇(v))dΩ - ∫(g*v)dΓ_N ``` @@ -991,7 +991,7 @@ state_map = NonlinearFEStateMap(R,U,V,V_φ,U_reg,φh,dΩ,dΓ_N) ``` This by default implements a standard `NewtonSolver` from GridapSolvers while utilising an LU solver for intermediate linear solves involving the Jacobian. As with other `FEStateMap` types, this constructor can optionally take assemblers and different solvers (e.g., PETSc solvers). -Next, we replace the objective functional on line 52 with +Next, we replace the objective functional on line 52 with ```julia J(u,φ,dΩ,dΓ_N) = ∫((I ∘ φ)*(κ ∘ u)*∇(u)⋅∇(u))dΩ @@ -1009,7 +1009,7 @@ Notice that the argument `analytic_dJ=...` has been removed, this enables the AD ``` ```julia -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt # FE parameters order = 1 # Finite element order @@ -1103,11 +1103,11 @@ A 3D example of a nonlinear thermal conductivity problem can be found under `scr ## References > 1. *Z. Guo, X. Cheng, and Z. Xia. Least dissipation principle of heat transport potential capacity and its application in heat conduction optimization. Chinese Science Bulletin, 48(4):406–410, Feb 2003. ISSN 1861-9541. doi: 10.1007/BF03183239.* -> +> > 2. *C. Zhuang, Z. Xiong, and H. Ding. A level set method for topology optimization of heat conduction problem under multiple load cases. Computer Methods in Applied Mechanics and Engineering, 196(4–6):1074–1084, Jan 2007. ISSN 00457825. doi: 10.1016/j.cma.2006.08.005.* > > 3. *Allaire G, Jouve F, Toader AM (2004) Structural optimization using sensitivity analysis and a level-set method. Journal of Computational Physics 194(1):363–393. doi: 10.1016/j.jcp.2003.09.032* -> +> > 4. *Allaire G, Dapogny C, Jouve F (2021) Shape and topology optimization, vol 22, Elsevier, p 1–132. doi: 10.1016/bs.hna.2020.10.004* > > 5. *Osher S, Fedkiw R (2006) Level Set Methods and Dynamic Implicit Surfaces, 1st edn. Applied Mathematical Sciences, Springer Science & Business Media. doi: 10.1007/b98879* diff --git a/scripts/Benchmarks/benchmark.jl b/scripts/Benchmarks/benchmark.jl index c3dc701c..c31d395c 100644 --- a/scripts/Benchmarks/benchmark.jl +++ b/scripts/Benchmarks/benchmark.jl @@ -1,7 +1,7 @@ -using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR +using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, + PartitionedArrays, GridapTopOpt, SparseMatricesCSR -using LevelSetTopOpt: get_deriv_space, get_aux_space,benchmark_optimizer, +using GridapTopOpt: get_deriv_space, get_aux_space,benchmark_optimizer, benchmark_forward_problem,benchmark_advection,benchmark_reinitialisation, benchmark_velocity_extension,benchmark_hilbertian_projection_map,benchmark_single_iteration @@ -75,7 +75,7 @@ function nl_elast(mesh_partition,ranks,el_size,order,verbose) F(∇u) = one(∇u) + ∇u' ## Volume change J(F) = sqrt(det(C(F))) - ## Residual + ## Residual res(u,v,φ,dΩ,dΓ_N) = ∫( (I ∘ φ)*((dE ∘ (∇(v),∇(u))) ⊙ (S ∘ ∇(u))) )*dΩ - ∫(g⋅v)dΓ_N Tm = SparseMatrixCSR{0,PetscScalar,PetscInt} Tv = Vector{PetscScalar} @@ -281,11 +281,11 @@ function inverter_HPM(mesh_partition,ranks,el_size,order,verbose) ## FE Setup model = CartesianDiscreteModel(ranks,mesh_partition,dom,el_size); el_Δ = get_el_Δ(model) - f_Γ_in(x) = (x[1] ≈ 0.0) && (0.4 - eps() <= x[2] <= 0.6 + eps()) && + f_Γ_in(x) = (x[1] ≈ 0.0) && (0.4 - eps() <= x[2] <= 0.6 + eps()) && (0.4 - eps() <= x[3] <= 0.6 + eps()) - f_Γ_out(x) = (x[1] ≈ 1.0) && (0.4 - eps() <= x[2] <= 0.6 + eps()) && + f_Γ_out(x) = (x[1] ≈ 1.0) && (0.4 - eps() <= x[2] <= 0.6 + eps()) && (0.4 - eps() <= x[3] <= 0.6 + eps()) - f_Γ_out_ext(x) = ~f_Γ_out(x) && (0.9 <= x[1] <= 1.0) && (0.3 - eps() <= x[2] <= 0.7 + eps()) && + f_Γ_out_ext(x) = ~f_Γ_out(x) && (0.9 <= x[1] <= 1.0) && (0.3 - eps() <= x[2] <= 0.7 + eps()) && (0.3 - eps() <= x[3] <= 0.7 + eps()) f_Γ_D(x) = (x[1] ≈ 0.0) && (x[2] <= 0.1 || x[2] >= 0.9) && (x[3] <= 0.1 || x[3] >= 0.9) update_labels!(1,model,f_Γ_in,"Gamma_in") @@ -337,7 +337,7 @@ function inverter_HPM(mesh_partition,ranks,el_size,order,verbose) Tm = SparseMatrixCSR{0,PetscScalar,PetscInt} Tv = Vector{PetscScalar} solver = ElasticitySolver(V) - + state_map = AffineFEStateMap( a,l,U,V,V_φ,U_reg,φh,dΩ,dΓ_in,dΓ_out; assem_U = SparseMatrixAssembler(Tm,Tv,U,V), @@ -355,7 +355,7 @@ function inverter_HPM(mesh_partition,ranks,el_size,order,verbose) assem = SparseMatrixAssembler(Tm,Tv,U_reg,V_reg), ls = PETScLinearSolver() ) - + ## Optimiser return HilbertianProjection(pcfs,ls_evo,vel_ext,φh;γ,γ_reinit,verbose=verbose) end @@ -365,21 +365,21 @@ with_mpi() do distribute mesh_partition = (Nx,Ny,Nz) ranks = distribute(LinearIndices((prod(mesh_partition),))) el_size = (N_EL,N_EL,N_EL) - verbose = Bool(VERBOSE) ? i_am_main(ranks) : false; + verbose = Bool(VERBOSE) ? i_am_main(ranks) : false; if PROB_TYPE == "NLELAST" - options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" opt = nl_elast - elseif PROB_TYPE == "THERM" - options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + elseif PROB_TYPE == "THERM" + options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" opt = therm elseif PROB_TYPE == "ELAST" - options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" opt = elast elseif PROB_TYPE == "INVERTER_HPM" - options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true + options = "-pc_type gamg -ksp_type cg -ksp_error_if_not_converged true -ksp_converged_reason -ksp_rtol 1.0e-12" opt = inverter_HPM else @@ -439,7 +439,7 @@ with_mpi() do distribute end ## HPM if occursin("bhpm",BMARK_TYPE) - @assert typeof(optim) <: HilbertianProjection + @assert typeof(optim) <: HilbertianProjection J, C, dJ, dC = Gridap.evaluate!(optim.problem,optim.φ0) optim.projector,dJ,C,dC,optim.vel_ext.K bhpm = benchmark_hilbertian_projection_map(optim.projector,dJ,C,dC,optim.vel_ext.K,ranks) diff --git a/scripts/Benchmarks/generate_benchmark_scripts.jl b/scripts/Benchmarks/generate_benchmark_scripts.jl index f142b86b..071177a4 100644 --- a/scripts/Benchmarks/generate_benchmark_scripts.jl +++ b/scripts/Benchmarks/generate_benchmark_scripts.jl @@ -34,11 +34,11 @@ function generate( ) ncpus = prod(mesh_partition) - wallhr = occursin("STRONG",name) && ncpus == 1 ? 100 : - occursin("STRONG",name) && ncpus == 8 ? 24 : + wallhr = occursin("STRONG",name) && ncpus == 1 ? 100 : + occursin("STRONG",name) && ncpus == 8 ? 24 : occursin("STRONG",name) && ncpus == 27 ? 10 : wallhr - mem = occursin("STRONG",name) && ncpus == 1 ? 100 : - occursin("STRONG",name) && ncpus == 8 ? 128 : + mem = occursin("STRONG",name) && ncpus == 1 ? 100 : + occursin("STRONG",name) && ncpus == 8 ? 128 : occursin("STRONG",name) && ncpus == 27 ? 192 : Int(gb_per_node*ncpus/cps_per_node); Nx_partition, Ny_partition, Nz_partition = mesh_partition @@ -68,7 +68,7 @@ function generate_jobs(template,phys_type,ndof_per_node,bmark_types) strong_jobs,weak_jobs,dof_sanity_check end -dir_name= "LevelSetTopOpt"; +dir_name= "GridapTopOpt"; job_output_path = "$(ENV["SCRATCH"])/$dir_name/scripts/Benchmarks/jobs-gadi/"; mkpath(job_output_path); @@ -103,7 +103,7 @@ jobs_by_phys = map(x->(x[1],generate_jobs(template,x[1],x[2],x[3])),phys_types); for jobs in jobs_by_phys strong_jobs,weak_jobs,dof_sanity_check = jobs[2] - + println("$(jobs[1]): Weak dofs = $(dof_sanity_check[1])\n Error = $(dof_sanity_check[2])\n") for job in vcat(strong_jobs,weak_jobs) name = job[1] diff --git a/scripts/MPI/3d_elastic_compliance_ALM.jl b/scripts/MPI/3d_elastic_compliance_ALM.jl index cb221837..a5ff5c02 100644 --- a/scripts/MPI/3d_elastic_compliance_ALM.jl +++ b/scripts/MPI/3d_elastic_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global elx = parse(Int,ARGS[1]) global ely = parse(Int,ARGS[2]) diff --git a/scripts/MPI/3d_hyperelastic_compliance_ALM.jl b/scripts/MPI/3d_hyperelastic_compliance_ALM.jl index fe6d8f61..35836c01 100644 --- a/scripts/MPI/3d_hyperelastic_compliance_ALM.jl +++ b/scripts/MPI/3d_hyperelastic_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR using GridapSolvers: NewtonSolver diff --git a/scripts/MPI/3d_hyperelastic_compliance_neohook_ALM.jl b/scripts/MPI/3d_hyperelastic_compliance_neohook_ALM.jl index 7d781947..a8c8b110 100644 --- a/scripts/MPI/3d_hyperelastic_compliance_neohook_ALM.jl +++ b/scripts/MPI/3d_hyperelastic_compliance_neohook_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR using GridapSolvers: NewtonSolver diff --git a/scripts/MPI/3d_inverse_homenisation_ALM.jl b/scripts/MPI/3d_inverse_homenisation_ALM.jl index 087372b4..359a1cf2 100644 --- a/scripts/MPI/3d_inverse_homenisation_ALM.jl +++ b/scripts/MPI/3d_inverse_homenisation_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global elx = parse(Int,ARGS[1]) global ely = parse(Int,ARGS[2]) diff --git a/scripts/MPI/3d_inverter_ALM.jl b/scripts/MPI/3d_inverter_ALM.jl index aa7fb6fd..93b4a36b 100644 --- a/scripts/MPI/3d_inverter_ALM.jl +++ b/scripts/MPI/3d_inverter_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global elx = parse(Int,ARGS[1]) global ely = parse(Int,ARGS[2]) diff --git a/scripts/MPI/3d_inverter_HPM.jl b/scripts/MPI/3d_inverter_HPM.jl index 681450d0..ccaf8dcd 100644 --- a/scripts/MPI/3d_inverter_HPM.jl +++ b/scripts/MPI/3d_inverter_HPM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global elx = parse(Int,ARGS[1]) global ely = parse(Int,ARGS[2]) diff --git a/scripts/MPI/3d_nonlinear_thermal_compliance_ALM.jl b/scripts/MPI/3d_nonlinear_thermal_compliance_ALM.jl index 08bcb6f2..dd6a0503 100644 --- a/scripts/MPI/3d_nonlinear_thermal_compliance_ALM.jl +++ b/scripts/MPI/3d_nonlinear_thermal_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR using GridapSolvers: NewtonSolver diff --git a/scripts/MPI/3d_thermal_compliance_ALM.jl b/scripts/MPI/3d_thermal_compliance_ALM.jl index 4bf81ede..2b238351 100644 --- a/scripts/MPI/3d_thermal_compliance_ALM.jl +++ b/scripts/MPI/3d_thermal_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global elx = parse(Int,ARGS[1]) global ely = parse(Int,ARGS[2]) diff --git a/scripts/MPI/elastic_compliance_ALM.jl b/scripts/MPI/elastic_compliance_ALM.jl index 2c434df7..2cdafccf 100644 --- a/scripts/MPI/elastic_compliance_ALM.jl +++ b/scripts/MPI/elastic_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global write_dir = ARGS[1] diff --git a/scripts/MPI/inverse_homenisation_ALM.jl b/scripts/MPI/inverse_homenisation_ALM.jl index 032e93c8..5946f589 100644 --- a/scripts/MPI/inverse_homenisation_ALM.jl +++ b/scripts/MPI/inverse_homenisation_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global write_dir = ARGS[1] diff --git a/scripts/MPI/nonlinear_thermal_compliance_ALM.jl b/scripts/MPI/nonlinear_thermal_compliance_ALM.jl index ceb760da..beccd0e9 100644 --- a/scripts/MPI/nonlinear_thermal_compliance_ALM.jl +++ b/scripts/MPI/nonlinear_thermal_compliance_ALM.jl @@ -1,5 +1,5 @@ using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global write_dir = ARGS[1] diff --git a/scripts/MPI/thermal_compliance_ALM.jl b/scripts/MPI/thermal_compliance_ALM.jl index a90225d5..9c78fbb1 100644 --- a/scripts/MPI/thermal_compliance_ALM.jl +++ b/scripts/MPI/thermal_compliance_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, GridapDistributed, GridapPETSc, PartitionedArrays, LevelSetTopOpt +using Gridap, GridapDistributed, GridapPETSc, PartitionedArrays, GridapTopOpt global write_dir = ARGS[1] diff --git a/scripts/MPI/thermal_compliance_ALM_PETSc.jl b/scripts/MPI/thermal_compliance_ALM_PETSc.jl index 0344f063..9248276f 100644 --- a/scripts/MPI/thermal_compliance_ALM_PETSc.jl +++ b/scripts/MPI/thermal_compliance_ALM_PETSc.jl @@ -1,5 +1,5 @@ using Gridap, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR + PartitionedArrays, GridapTopOpt, SparseMatricesCSR global write_dir = ARGS[1] diff --git a/scripts/Serial/elastic_compliance_ALM.jl b/scripts/Serial/elastic_compliance_ALM.jl index 25c7aeb7..9f0b7024 100644 --- a/scripts/Serial/elastic_compliance_ALM.jl +++ b/scripts/Serial/elastic_compliance_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum elastic compliance with augmented Lagrangian method in 2D. diff --git a/scripts/Serial/elastic_compliance_HPM.jl b/scripts/Serial/elastic_compliance_HPM.jl index 14e734fe..9f787c30 100644 --- a/scripts/Serial/elastic_compliance_HPM.jl +++ b/scripts/Serial/elastic_compliance_HPM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum elastic compliance with Hilbertian projection method in 2D. diff --git a/scripts/Serial/elastic_compliance_LM.jl b/scripts/Serial/elastic_compliance_LM.jl index 135cccd8..b09e501c 100644 --- a/scripts/Serial/elastic_compliance_LM.jl +++ b/scripts/Serial/elastic_compliance_LM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum elastic compliance with Lagrangian method in 2D. diff --git a/scripts/Serial/hyperelastic_compliance_ALM.jl b/scripts/Serial/hyperelastic_compliance_ALM.jl index e3f15cfd..dce96bd5 100644 --- a/scripts/Serial/hyperelastic_compliance_ALM.jl +++ b/scripts/Serial/hyperelastic_compliance_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum hyperelastic compliance with augmented Lagrangian method in 2D. diff --git a/scripts/Serial/hyperelastic_compliance_neohook_ALM.jl b/scripts/Serial/hyperelastic_compliance_neohook_ALM.jl index d6ae9163..eca9d1ce 100644 --- a/scripts/Serial/hyperelastic_compliance_neohook_ALM.jl +++ b/scripts/Serial/hyperelastic_compliance_neohook_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum hyperelastic (neohookean) compliance with augmented Lagrangian method in 2D. diff --git a/scripts/Serial/inverse_homenisation_ALM.jl b/scripts/Serial/inverse_homenisation_ALM.jl index d06b2d47..972d4328 100644 --- a/scripts/Serial/inverse_homenisation_ALM.jl +++ b/scripts/Serial/inverse_homenisation_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Maximum bulk modulus inverse homogenisation with augmented Lagrangian method in 2D. diff --git a/scripts/Serial/inverter_ALM.jl b/scripts/Serial/inverter_ALM.jl index f9a8e3be..ad2d8786 100644 --- a/scripts/Serial/inverter_ALM.jl +++ b/scripts/Serial/inverter_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Inverter mechanism with Hilbertian projection method in 2D. diff --git a/scripts/Serial/inverter_HPM.jl b/scripts/Serial/inverter_HPM.jl index 2274a2f4..fbe13122 100644 --- a/scripts/Serial/inverter_HPM.jl +++ b/scripts/Serial/inverter_HPM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Inverter mechanism with Hilbertian projection method in 2D. diff --git a/scripts/Serial/nonlinear_thermal_compliance_ALM.jl b/scripts/Serial/nonlinear_thermal_compliance_ALM.jl index 16ffa9b4..2bf32486 100644 --- a/scripts/Serial/nonlinear_thermal_compliance_ALM.jl +++ b/scripts/Serial/nonlinear_thermal_compliance_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum thermal compliance with augmented Lagrangian method in 2D with nonlinear diffusivity. diff --git a/scripts/Serial/thermal_compliance_ALM.jl b/scripts/Serial/thermal_compliance_ALM.jl index 1201db0d..aaf45b24 100644 --- a/scripts/Serial/thermal_compliance_ALM.jl +++ b/scripts/Serial/thermal_compliance_ALM.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum thermal compliance with augmented Lagrangian method in 2D. diff --git a/scripts/Serial/thermal_compliance_ALM_higherorderlsf.jl b/scripts/Serial/thermal_compliance_ALM_higherorderlsf.jl index 3db4ea46..51f8e1a1 100644 --- a/scripts/Serial/thermal_compliance_ALM_higherorderlsf.jl +++ b/scripts/Serial/thermal_compliance_ALM_higherorderlsf.jl @@ -1,4 +1,4 @@ -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum thermal compliance with augmented Lagrangian method in 2D. diff --git a/src/ChainRules.jl b/src/ChainRules.jl index 501c4f7b..361f8daa 100644 --- a/src/ChainRules.jl +++ b/src/ChainRules.jl @@ -1,8 +1,8 @@ """ struct IntegrandWithMeasure{A,B<:Tuple} -A wrapper to enable serial or parallel partial differentation of an -integral `F` using `Gridap.gradient`. This is required to allow automatic +A wrapper to enable serial or parallel partial differentation of an +integral `F` using `Gridap.gradient`. This is required to allow automatic differentation with `DistributedMeasure`. # Properties @@ -30,13 +30,13 @@ evaluate the partial derivative of `F.F` with respect to `uh[K]`. # Example Suppose `uh` and `φh` are FEFunctions with measures `dΩ` and `dΓ_N`. -Then the partial derivative of a function `J` wrt to `φh` is computed via +Then the partial derivative of a function `J` wrt to `φh` is computed via ```` J(u,φ,dΩ,dΓ_N) = ∫(f(u,φ))dΩ + ∫(g(u,φ))dΓ_N J_iwm = IntegrandWithMeasure(J,(dΩ,dΓ_N)) ∂J∂φh = ∇(J_iwm,[uh,φh],2) ```` -where `f` and `g` are user defined. +where `f` and `g` are user defined. """ function Gridap.gradient(F::IntegrandWithMeasure,uh::Vector{<:FEFunction},K::Int) @check 0 < K <= length(uh) @@ -49,8 +49,8 @@ function Gridap.gradient(F::IntegrandWithMeasure,uh::Vector,K::Int) local_fields = map(local_views,uh) |> to_parray_of_arrays local_measures = map(local_views,F.dΩ) |> to_parray_of_arrays contribs = map(local_measures,local_fields) do dΩ,lf - # TODO: Remove second term below, this is a fix for the problem discussed in - # https://github.com/zjwegert/LSTO_Distributed/issues/46 + # TODO: Remove second term below, this is a fix for the problem discussed in + # https://github.com/zjwegert/GridapTopOpt/issues/46 _f = u -> F.F(lf[1:K-1]...,u,lf[K+1:end]...,dΩ...) #+ ∑(∫(0)dΩ[i] for i = 1:length(dΩ)) return Gridap.Fields.gradient(_f,lf[K]) end @@ -62,7 +62,7 @@ Gridap.gradient(F::IntegrandWithMeasure,uh) = Gridap.gradient(F,[uh],1) """ Gridap.jacobian(F::IntegrandWithMeasure,uh::Vector,K::Int) -Given an an `IntegrandWithMeasure` `F` and a vector of `FEFunctions` or `CellField` `uh` +Given an an `IntegrandWithMeasure` `F` and a vector of `FEFunctions` or `CellField` `uh` (excluding measures) evaluate the Jacobian `F.F` with respect to `uh[K]`. """ function Gridap.jacobian(F::IntegrandWithMeasure,uh::Vector{<:Union{FEFunction,CellField}},K::Int) @@ -93,7 +93,7 @@ function GridapDistributed.to_parray_of_arrays(a::NTuple{N,T}) where {N,T<:Debug aj.items[i] end end -end +end function GridapDistributed.to_parray_of_arrays(a::NTuple{N,T}) where {N,T<:MPIArray} indices = linear_indices(first(a)) @@ -130,7 +130,7 @@ end StateParamIntegrandWithMeasure(F::IntegrandWithMeasure,U::FESpace,V_φ::FESpace, U_reg::FESpace,assem_U::Assembler,assem_deriv::Assembler) -Create an instance of `StateParamIntegrandWithMeasure`. +Create an instance of `StateParamIntegrandWithMeasure`. """ function StateParamIntegrandWithMeasure( F::IntegrandWithMeasure, @@ -151,7 +151,7 @@ end """ (u_to_j::StateParamIntegrandWithMeasure)(uh,φh) -Evaluate the `StateParamIntegrandWithMeasure` at parameters `uh` and `φh`. +Evaluate the `StateParamIntegrandWithMeasure` at parameters `uh` and `φh`. """ (u_to_j::StateParamIntegrandWithMeasure)(uh,φh) = sum(u_to_j.F(uh,φh)) @@ -165,7 +165,7 @@ end """ ChainRulesCore.rrule(u_to_j::StateParamIntegrandWithMeasure,uh,φh) -Return the evaluation of a `StateParamIntegrandWithMeasure` and a +Return the evaluation of a `StateParamIntegrandWithMeasure` and a a function for evaluating the pullback of `u_to_j`. This enables compatiblity with `ChainRules.jl` """ @@ -200,7 +200,7 @@ end """ abstract type AbstractFEStateMap -Types inheriting from this abstract type should enable the evaluation and differentiation of +Types inheriting from this abstract type should enable the evaluation and differentiation of the solution to an FE problem `u` that implicitly depends on an auxiliary parameter `φ`. """ abstract type AbstractFEStateMap end @@ -208,7 +208,7 @@ abstract type AbstractFEStateMap end """ get_state(m::AbstractFEStateMap) -Return the solution/state `u` to the FE problem. +Return the solution/state `u` to the FE problem. """ get_state(::AbstractFEStateMap) = @abstractmethod @@ -222,7 +222,7 @@ get_measure(::AbstractFEStateMap) = @abstractmethod """ get_spaces(m::AbstractFEStateMap) -Return a collection of FE spaces. The first four entires should correspond to +Return a collection of FE spaces. The first four entires should correspond to [`get_trial_space`](@ref), [`get_test_space`](@ref), [`get_aux_space`](@ref), and [`get_deriv_space`](@ref) unless these are overloaded for a particular implementation. """ @@ -231,8 +231,8 @@ get_spaces(::AbstractFEStateMap) = @abstractmethod """ get_assemblers(m::AbstractFEStateMap) -Return a collection of assemblers. The first two entires should correspond to -[`get_pde_assembler`](@ref) and [`get_deriv_assembler`](@ref) unless these are +Return a collection of assemblers. The first two entires should correspond to +[`get_pde_assembler`](@ref) and [`get_deriv_assembler`](@ref) unless these are overloaded for a particular implementation. """ get_assemblers(::AbstractFEStateMap) = @abstractmethod @@ -326,7 +326,7 @@ This should solve the linear problem `dRduᵀ*λ = ∂F∂uᵀ`. """ function adjoint_solve!(φ_to_u::AbstractFEStateMap,du::AbstractVector) @abstractmethod -end +end """ dRdφ(φ_to_u::AbstractFEStateMap,uh,vh,φh) @@ -347,7 +347,7 @@ end """ pullback(φ_to_u::AbstractFEStateMap,uh,φh,du;updated) -Compute `∂F∂u*dudφ` at `φh` and `uh` using the adjoint method. I.e., let +Compute `∂F∂u*dudφ` at `φh` and `uh` using the adjoint method. I.e., let `∂F∂u*dudφ = -λᵀ*dRdφ` @@ -365,10 +365,10 @@ function pullback(φ_to_u::AbstractFEStateMap,uh,φh,du;updated=false) λh = FEFunction(get_test_space(φ_to_u),λ) ## Compute grad - dudφ_vecdata = collect_cell_vector(U_reg,dRdφ(φ_to_u,uh,λh,φh)) + dudφ_vecdata = collect_cell_vector(U_reg,dRdφ(φ_to_u,uh,λh,φh)) assemble_vector!(dudφ_vec,assem_deriv,dudφ_vecdata) rmul!(dudφ_vec, -1) - + return (NoTangent(),dudφ_vec) end @@ -381,7 +381,7 @@ end """ rrule(φ_to_u::AbstractFEStateMap,φh) -Return the evaluation of a `AbstractFEStateMap` and a +Return the evaluation of a `AbstractFEStateMap` and a a function for evaluating the pullback of `φ_to_u`. This enables compatiblity with `ChainRules.jl` """ @@ -452,7 +452,7 @@ struct AffineFEStateMap{A,B,C,D,E,F} <: AbstractFEStateMap Create an instance of `AffineFEStateMap` given the bilinear form `a` and linear form `l` as `Function` types, trial and test spaces `U` and `V`, the FE space `V_φ` - for `φh`, the FE space `U_reg` for derivatives, and the measures as additional arguments. + for `φh`, the FE space `U_reg` for derivatives, and the measures as additional arguments. Optional arguments enable specification of assemblers and linear solvers. """ @@ -567,9 +567,9 @@ struct NonlinearFEStateMap{A,B,C,D,E,F} <: AbstractFEStateMap adjoint_ls::LinearSolver = LUSolver() ) - Create an instance of `NonlinearFEStateMap` given the residual `res` as a `Function` type, - trial and test spaces `U` and `V`, the FE space `V_φ` for `φh`, the FE space `U_reg` - for derivatives, and the measures as additional arguments. + Create an instance of `NonlinearFEStateMap` given the residual `res` as a `Function` type, + trial and test spaces `U` and `V`, the FE space `V_φ` for `φh`, the FE space `U_reg` + for derivatives, and the measures as additional arguments. Optional arguments enable specification of assemblers, nonlinear solver, and adjoint (linear) solver. """ @@ -605,7 +605,7 @@ struct NonlinearFEStateMap{A,B,C,D,E,F} <: AbstractFEStateMap adjoint_x = allocate_in_domain(adjoint_K); fill!(adjoint_x,zero(eltype(adjoint_x))) adjoint_ns = numerical_setup(symbolic_setup(adjoint_ls,adjoint_K),adjoint_K) adj_caches = (adjoint_ns,adjoint_K,adjoint_x,assem_adjoint) - + A, B, C = typeof(res), typeof(jac), typeof(spaces) D, E, F = typeof(plb_caches), typeof(fwd_caches), typeof(adj_caches) return new{A,B,C,D,E,F}(res,jac,spaces,plb_caches,fwd_caches,adj_caches) @@ -685,16 +685,16 @@ struct RepeatingAffineFEStateMap{A,B,C,D,E,F,G} <: AbstractFEStateMap adjoint_ls::LinearSolver = LUSolver() ) - Create an instance of `RepeatingAffineFEStateMap` given the number of blocks `nblocks`, - a bilinear form `a`, a vector of linear form `l` as `Function` types, the trial and test - spaces `U` and `V`, the FE space `V_φ` for `φh`, the FE space `U_reg` for derivatives, - and the measures as additional arguments. + Create an instance of `RepeatingAffineFEStateMap` given the number of blocks `nblocks`, + a bilinear form `a`, a vector of linear form `l` as `Function` types, the trial and test + spaces `U` and `V`, the FE space `V_φ` for `φh`, the FE space `U_reg` for derivatives, + and the measures as additional arguments. Optional arguments enable specification of assemblers and linear solvers. # Note - - The resulting `FEFunction` will be a `MultiFieldFEFunction` (or GridapDistributed equivalent) + - The resulting `FEFunction` will be a `MultiFieldFEFunction` (or GridapDistributed equivalent) where each field corresponds to an entry in the vector of linear forms """ function RepeatingAffineFEStateMap( @@ -790,7 +790,7 @@ repeated_blocks(::ConsecutiveMultiFieldStyle,V0,x::AbstractBlockVector) = blocks function repeated_blocks(::BlockMultiFieldStyle{NB},V0::MultiFieldSpaceTypes,x::AbstractBlockVector) where NB xb = blocks(x) @assert length(xb) % NB == 0 - + nblocks = length(xb) ÷ NB rep_blocks = map(1:nblocks) do iB mortar(xb[(iB-1)*NB+1:iB*NB]) @@ -875,37 +875,37 @@ end """ struct PDEConstrainedFunctionals{N,A} -An object that computes the objective, constraints, and their derivatives. +An object that computes the objective, constraints, and their derivatives. # Implementation -This implementation computes derivatives of a integral quantity +This implementation computes derivatives of a integral quantity -``F(u(\\varphi),\\varphi,\\mathrm{d}\\Omega_1,\\mathrm{d}\\Omega_2,...) = -\\Sigma_{i}\\int_{\\Omega_i} f_i(\\varphi)~\\mathrm{d}\\Omega`` +``F(u(\\varphi),\\varphi,\\mathrm{d}\\Omega_1,\\mathrm{d}\\Omega_2,...) = +\\Sigma_{i}\\int_{\\Omega_i} f_i(\\varphi)~\\mathrm{d}\\Omega`` with respect to an auxiliary parameter ``\\varphi`` where ``u`` -is the solution to a PDE and implicitly depends on ``\\varphi``. +is the solution to a PDE and implicitly depends on ``\\varphi``. This requires two pieces of information: - 1) Computation of ``\\frac{\\partial F}{\\partial u}`` and + 1) Computation of ``\\frac{\\partial F}{\\partial u}`` and ``\\frac{\\partial F}{\\partial \\varphi}`` (handled by [`StateParamIntegrandWithMeasure `](@ref)). 2) Computation of ``\\frac{\\partial F}{\\partial u} - \\frac{\\partial u}{\\partial \\varphi}`` at ``\\varphi`` and ``u`` - using the adjoint method (handled by [`AbstractFEStateMap`](@ref)). I.e., let - + \\frac{\\partial u}{\\partial \\varphi}`` at ``\\varphi`` and ``u`` + using the adjoint method (handled by [`AbstractFEStateMap`](@ref)). I.e., let + ``\\frac{\\partial F}{\\partial u} - \\frac{\\partial u}{\\partial \\varphi} = -\\lambda^\\intercal + \\frac{\\partial u}{\\partial \\varphi} = -\\lambda^\\intercal \\frac{\\partial \\mathcal{R}}{\\partial \\varphi}`` - where ``\\mathcal{R}`` is the residual and solve the (linear) adjoint + where ``\\mathcal{R}`` is the residual and solve the (linear) adjoint problem: - - ``\\frac{\\partial \\mathcal{R}}{\\partial u}^\\intercal\\lambda = + + ``\\frac{\\partial \\mathcal{R}}{\\partial u}^\\intercal\\lambda = \\frac{\\partial F}{\\partial u}^\\intercal.`` -The gradient is then ``\\frac{\\partial F}{\\partial \\varphi} = -\\frac{\\partial F}{\\partial \\varphi} - +The gradient is then ``\\frac{\\partial F}{\\partial \\varphi} = +\\frac{\\partial F}{\\partial \\varphi} - \\frac{\\partial F}{\\partial u}\\frac{\\partial u}{\\partial \\varphi}``. # Parameters @@ -938,7 +938,7 @@ struct PDEConstrainedFunctionals{N,A} Create an instance of `PDEConstrainedFunctionals`. The arguments for the objective and constraints must follow the specification in [`StateParamIntegrandWithMeasure`](@ref). - By default we use automatic differentation for the objective and all constraints. This + By default we use automatic differentation for the objective and all constraints. This can be disabled by passing the shape derivative as a type `Function` to `analytic_dJ` and/or entires in `analytic_dC`. """ @@ -952,7 +952,7 @@ struct PDEConstrainedFunctionals{N,A} # Create StateParamIntegrandWithMeasures J = StateParamIntegrandWithMeasure(objective,state_map) C = map(Ci -> StateParamIntegrandWithMeasure(Ci,state_map),constraints) - + # Preallocate dJ = similar(J.caches[2]) dC = map(Ci->similar(Ci.caches[2]),C) @@ -968,7 +968,7 @@ end Create an instance of `PDEConstrainedFunctionals` when the problem has no constraints. """ -PDEConstrainedFunctionals(J::Function,state_map::AbstractFEStateMap;analytic_dJ=nothing) = +PDEConstrainedFunctionals(J::Function,state_map::AbstractFEStateMap;analytic_dJ=nothing) = PDEConstrainedFunctionals(J,Function[],state_map;analytic_dJ = analytic_dJ,analytic_dC = Nothing[]) get_state(m::PDEConstrainedFunctionals) = get_state(m.state_map) @@ -1011,7 +1011,7 @@ end Fields.evaluate!(pcf::PDEConstrainedFunctionals,φh) Evaluate the objective and constraints, and their derivatives at -`φh`. +`φh`. """ function Fields.evaluate!(pcf::PDEConstrainedFunctionals,φh) J, C, dJ, dC = pcf.J,pcf.C,pcf.dJ,pcf.dC @@ -1031,7 +1031,7 @@ function Fields.evaluate!(pcf::PDEConstrainedFunctionals,φh) # Automatic differentation j_val, j_pullback = rrule(F,uh,φh) # Compute functional and pull back _, dFdu, dFdφ = j_pullback(1) # Compute dFdu, dFdφ - _, dφ_adj = u_pullback(dFdu) # Compute -dFdu*dudφ via adjoint + _, dφ_adj = u_pullback(dFdu) # Compute -dFdu*dudφ via adjoint copy!(dF,dφ_adj) dF .+= dFdφ return j_val diff --git a/src/LevelSetTopOpt.jl b/src/GridapTopOpt.jl similarity index 94% rename from src/LevelSetTopOpt.jl rename to src/GridapTopOpt.jl index 43f948f7..c0c0dbea 100644 --- a/src/LevelSetTopOpt.jl +++ b/src/GridapTopOpt.jl @@ -1,4 +1,4 @@ -module LevelSetTopOpt +module GridapTopOpt using GridapPETSc, GridapPETSc.PETSC using GridapPETSc: PetscScalar, PetscInt, PETSC, @check_error_code @@ -19,7 +19,7 @@ using Gridap.FESpaces: get_assembly_strategy using Gridap: writevtk using GridapDistributed -using GridapDistributed: DistributedDiscreteModel, DistributedTriangulation, +using GridapDistributed: DistributedDiscreteModel, DistributedTriangulation, DistributedFESpace, DistributedDomainContribution, to_parray_of_arrays, allocate_in_domain, DistributedCellField, DistributedMultiFieldCellField, DistributedMultiFieldFEBasis, BlockPMatrix, BlockPVector, change_ghost diff --git a/src/Optimisers/Optimisers.jl b/src/Optimisers/Optimisers.jl index c95aa097..e5e31537 100644 --- a/src/Optimisers/Optimisers.jl +++ b/src/Optimisers/Optimisers.jl @@ -1,8 +1,8 @@ """ abstract type Optimiser -Optimisers in LevelSetTopOpt.jl are implemented as iterators. -Your own optimiser can be implemented by implementing +Optimisers in GridapTopOpt.jl are implemented as iterators. +Your own optimiser can be implemented by implementing concrete functionality of the below. """ abstract type Optimiser end @@ -43,7 +43,7 @@ get_history(::Optimiser) :: OptimiserHistory = @abstractmethod """ converged(::Optimiser) -Return a `Bool` that is true if the `Optimiser` has +Return a `Bool` that is true if the `Optimiser` has converged, otherwise false. """ function converged(::Optimiser) :: Bool @@ -59,7 +59,7 @@ function finished(m::Optimiser) :: Bool return A || B end -function print_msg(opt::Optimiser,msg::String;kwargs...) +function print_msg(opt::Optimiser,msg::String;kwargs...) print_msg(get_history(opt),msg;kwargs...) end @@ -203,7 +203,7 @@ end """ write_history(path::String,h::OptimiserHistory;ranks=nothing) - + Write the contents of an `OptimiserHistory` object to a `path`. Provide MPI `ranks` when running in parallel. """ @@ -211,7 +211,7 @@ function write_history(path::String,h::OptimiserHistory;ranks=nothing) if i_am_main(ranks) open(path,"w") do f write(f,h) - end + end end end @@ -224,7 +224,7 @@ end """ struct OptimiserHistorySlice{T} end -A read-only wrapper of OptimiserHistory for IO display +A read-only wrapper of OptimiserHistory for IO display of iteration history at a specific iteration. """ struct OptimiserHistorySlice{T} diff --git a/test/seq/InverseHomogenisationALMTests.jl b/test/seq/InverseHomogenisationALMTests.jl index 82929b7d..b0b79d4a 100644 --- a/test/seq/InverseHomogenisationALMTests.jl +++ b/test/seq/InverseHomogenisationALMTests.jl @@ -1,7 +1,7 @@ module InverseHomogenisationALMTests using Test -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Maximum bulk modulus inverse homogenisation with augmented Lagrangian method in 2D. @@ -10,9 +10,9 @@ using Gridap, LevelSetTopOpt Min J(Ω) = -κ(Ω) Ω s.t., Vol(Ω) = vf, - ⎡For unique εᴹᵢ, find uᵢ∈V=H¹ₚₑᵣ(Ω)ᵈ, + ⎡For unique εᴹᵢ, find uᵢ∈V=H¹ₚₑᵣ(Ω)ᵈ, ⎣∫ ∑ᵢ C ⊙ ε(uᵢ) ⊙ ε(vᵢ) dΩ = ∫ -∑ᵢ C ⊙ ε⁰ᵢ ⊙ ε(vᵢ) dΩ, ∀v∈V. -""" +""" function main(;AD) ## Parameters order = 1 @@ -85,7 +85,7 @@ function main(;AD) α = α_coeff*maximum(el_Δ) a_hilb(p,q) =∫(α^2*∇(p)⋅∇(q) + p*q)dΩ vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) - + ## Optimiser optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh; γ,γ_reinit,verbose=true,constraint_names=[:Vol]) diff --git a/test/seq/InverterHPMTests.jl b/test/seq/InverterHPMTests.jl index ce94a389..06fffac5 100644 --- a/test/seq/InverterHPMTests.jl +++ b/test/seq/InverterHPMTests.jl @@ -1,7 +1,7 @@ module InverterHPMTests using Test -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Inverter mechanism with Hilbertian projection method in 2D. @@ -10,13 +10,13 @@ using Gridap, LevelSetTopOpt Min J(Ω) = ηᵢₙ*∫ u⋅e₁ dΓᵢₙ/Vol(Γᵢₙ) Ω s.t., Vol(Ω) = vf, - C(Ω) = 0, - ⎡u∈V=H¹(Ω;u(Γ_D)=0)ᵈ, + C(Ω) = 0, + ⎡u∈V=H¹(Ω;u(Γ_D)=0)ᵈ, ⎣∫ C ⊙ ε(u) ⊙ ε(v) dΩ + ∫ kₛv⋅u dΓₒᵤₜ = ∫ v⋅g dΓᵢₙ , ∀v∈V. - + where C(Ω) = ∫ -u⋅e₁-δₓ dΓₒᵤₜ/Vol(Γₒᵤₜ). We assume symmetry in the problem to aid convergence. -""" +""" function main() ## Parameters order = 1 @@ -33,7 +33,7 @@ function main() δₓ = 0.2 ks = 0.1 g = VectorValue(0.5,0) - + ## FE Setup model = CartesianDiscreteModel(dom,el_size) el_Δ = get_el_Δ(model) @@ -97,7 +97,7 @@ function main() α = α_coeff*maximum(el_Δ) a_hilb(p,q) = ∫(α^2*∇(p)⋅∇(q) + p*q)dΩ; vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) - + ## Optimiser optimiser = HilbertianProjection(pcfs,ls_evo,vel_ext,φh; γ,γ_reinit,verbose=true,debug=true,constraint_names=[:Vol,:UΓ_out]) diff --git a/test/seq/NonlinearThermalComplianceALMTests.jl b/test/seq/NonlinearThermalComplianceALMTests.jl index c624d997..c4808705 100644 --- a/test/seq/NonlinearThermalComplianceALMTests.jl +++ b/test/seq/NonlinearThermalComplianceALMTests.jl @@ -1,7 +1,7 @@ module NonlinearThermalComplianceALMTests using Test -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum thermal compliance with augmented Lagrangian method in 2D with nonlinear diffusivity. @@ -12,9 +12,9 @@ using Gridap, LevelSetTopOpt s.t., Vol(Ω) = vf, ⎡u∈V=H¹(Ω;u(Γ_D)=0), ⎣∫ κ(u)*∇(u)⋅∇(v) dΩ = ∫ v dΓ_N, ∀v∈V. - + In this example κ(u) = κ0*(exp(ξ*u)) -""" +""" function main() ## Parameters order = 1 @@ -34,9 +34,9 @@ function main() ## FE Setup model = CartesianDiscreteModel(dom,el_size); el_Δ = get_el_Δ(model) - f_Γ_D(x) = (x[1] ≈ 0.0 && (x[2] <= ymax*prop_Γ_D + eps() || + f_Γ_D(x) = (x[1] ≈ 0.0 && (x[2] <= ymax*prop_Γ_D + eps() || x[2] >= ymax-ymax*prop_Γ_D - eps())); - f_Γ_N(x) = (x[1] ≈ xmax && ymax/2-ymax*prop_Γ_N/2 - eps() <= x[2] <= + f_Γ_N(x) = (x[1] ≈ xmax && ymax/2-ymax*prop_Γ_N/2 - eps() <= x[2] <= ymax/2+ymax*prop_Γ_N/2 + eps()); update_labels!(1,model,f_Γ_D,"Gamma_D") update_labels!(2,model,f_Γ_N,"Gamma_N") @@ -84,7 +84,7 @@ function main() α = α_coeff*maximum(el_Δ) a_hilb(p,q) =∫(α^2*∇(p)⋅∇(q) + p*q)dΩ vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) - + ## Optimiser optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh; γ,γ_reinit,verbose=true,constraint_names=[:Vol]) diff --git a/test/seq/PZMultiFieldRepeatingStateTests.jl b/test/seq/PZMultiFieldRepeatingStateTests.jl index 827f9c91..06db9bb2 100644 --- a/test/seq/PZMultiFieldRepeatingStateTests.jl +++ b/test/seq/PZMultiFieldRepeatingStateTests.jl @@ -1,8 +1,8 @@ module PZMultiFieldRepeatingStateTests using Test -using Gridap, GridapDistributed, GridapPETSc, GridapSolvers, - PartitionedArrays, LevelSetTopOpt, SparseMatricesCSR +using Gridap, GridapDistributed, GridapPETSc, GridapSolvers, + PartitionedArrays, GridapTopOpt, SparseMatricesCSR using Gridap.TensorValues, Gridap.Helpers @@ -54,8 +54,8 @@ function main(;AD) ## Material tensors C, e, κ = PZT5A_2D(); - k0 = norm(C.data,Inf); - α0 = norm(e.data,Inf); + k0 = norm(C.data,Inf); + α0 = norm(e.data,Inf); β0 = norm(κ.data,Inf); γ0 = β0*k0/α0^2; @@ -66,9 +66,9 @@ function main(;AD) Eⁱ = (VectorValue(1.0,0.0,), VectorValue(0.0,1.0)) - a((u,ϕ),(v,q),φ,dΩ) = ∫((I ∘ φ) * (1/k0*((C ⊙ ε(u)) ⊙ ε(v)) - + a((u,ϕ),(v,q),φ,dΩ) = ∫((I ∘ φ) * (1/k0*((C ⊙ ε(u)) ⊙ ε(v)) - 1/α0*((-∇(ϕ) ⋅ e) ⊙ ε(v)) + - -1/α0*((e ⋅² ε(u)) ⋅ -∇(q)) + + -1/α0*((e ⋅² ε(u)) ⋅ -∇(q)) + -γ0/β0*((κ ⋅ -∇(ϕ)) ⋅ -∇(q))) )dΩ; l_ε = [((v,q),φ,dΩ) -> ∫(((I ∘ φ) * (-C ⊙ εᴹ[i] ⊙ ε(v) + k0/α0*(e ⋅² εᴹ[i]) ⋅ -∇(q))))dΩ for i = 1:3]; @@ -84,9 +84,9 @@ function main(;AD) u_r = uϕ[2r-1]; ϕ_r = uϕ[2r] u_s = uϕ[2s-1]; ϕ_s = uϕ[2s] ∫(- 1/k0 * q * ( - (C ⊙ (1/k0*ε(u_s) + εᴹ[s])) ⊙ (1/k0*ε(u_r) + εᴹ[r]) - + (C ⊙ (1/k0*ε(u_s) + εᴹ[s])) ⊙ (1/k0*ε(u_r) + εᴹ[r]) - (-1/α0*∇(ϕ_s) ⋅ e) ⊙ (1/k0*ε(u_r) + εᴹ[r]) - - (e ⋅² (1/k0*ε(u_s) + εᴹ[s])) ⋅ (-1/α0*∇(ϕ_r)) - + (e ⋅² (1/k0*ε(u_s) + εᴹ[s])) ⋅ (-1/α0*∇(ϕ_r)) - (κ ⋅ (-1/α0*∇(ϕ_s))) ⋅ (-1/α0*∇(ϕ_r)) ) * (DH ∘ φ) * (norm ∘ ∇(φ)) )dΩ; diff --git a/test/seq/ThermalComplianceALMTests.jl b/test/seq/ThermalComplianceALMTests.jl index c96225f8..fcf3704d 100644 --- a/test/seq/ThermalComplianceALMTests.jl +++ b/test/seq/ThermalComplianceALMTests.jl @@ -1,7 +1,7 @@ module ThermalComplianceALMTests using Test -using Gridap, LevelSetTopOpt +using Gridap, GridapTopOpt """ (Serial) Minimum thermal compliance with augmented Lagrangian method in 2D. @@ -12,7 +12,7 @@ using Gridap, LevelSetTopOpt s.t., Vol(Ω) = vf, ⎡u∈V=H¹(Ω;u(Γ_D)=0), ⎣∫ κ*∇(u)⋅∇(v) dΩ = ∫ v dΓ_N, ∀v∈V. -""" +""" function main(;order,AD) ## Parameters xmax = ymax = 1.0 @@ -32,9 +32,9 @@ function main(;order,AD) ## FE Setup model = CartesianDiscreteModel(dom,el_size); el_Δ = get_el_Δ(model) - f_Γ_D(x) = (x[1] ≈ 0.0 && (x[2] <= ymax*prop_Γ_D + eps() || + f_Γ_D(x) = (x[1] ≈ 0.0 && (x[2] <= ymax*prop_Γ_D + eps() || x[2] >= ymax-ymax*prop_Γ_D - eps())) - f_Γ_N(x) = (x[1] ≈ xmax && ymax/2-ymax*prop_Γ_N/2 - eps() <= x[2] <= + f_Γ_N(x) = (x[1] ≈ xmax && ymax/2-ymax*prop_Γ_N/2 - eps() <= x[2] <= ymax/2+ymax*prop_Γ_N/2 + eps()) update_labels!(1,model,f_Γ_D,"Gamma_D") update_labels!(2,model,f_Γ_N,"Gamma_N") @@ -85,7 +85,7 @@ function main(;order,AD) α = α_coeff*maximum(el_Δ) a_hilb(p,q) =∫(α^2*∇(p)⋅∇(q) + p*q)dΩ; vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) - + ## Optimiser optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh; γ,γ_reinit,verbose=true,constraint_names=[:Vol]) @@ -105,7 +105,7 @@ end s.t., Vol(Ω) = vf, ⎡u∈V=H¹(Ω;u(Γ_D)=0), ⎣∫ κ*∇(u)⋅∇(v) dΩ = ∫ v dΓ_N, ∀v∈V. -""" +""" function main_3d(;order) ## Parameters xmax = ymax = zmax = 1.0 @@ -174,7 +174,7 @@ function main_3d(;order) α = α_coeff*maximum(el_Δ) a_hilb(p,q) =∫(α^2*∇(p)⋅∇(q) + p*q)dΩ; vel_ext = VelocityExtension(a_hilb,U_reg,V_reg) - + ## Optimiser optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh; γ,γ_reinit,verbose=true,constraint_names=[:Vol])