Skip to content

Commit

Permalink
Added MPI tests and cleaned up Solvers.jl
Browse files Browse the repository at this point in the history
  • Loading branch information
zjwegert committed May 14, 2024
1 parent b1efa91 commit 25cff26
Show file tree
Hide file tree
Showing 10 changed files with 1,093 additions and 85 deletions.
2 changes: 0 additions & 2 deletions src/GridapTopOpt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@ export reinit!

include("Solvers.jl")
export ElasticitySolver
export BlockDiagonalPreconditioner
export MUMPSSolver

include("VelocityExtension.jl")
export VelocityExtension
Expand Down
87 changes: 5 additions & 82 deletions src/Solvers.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
## TODO: @Jordi, are these going into GridapSolvers or should I write documentation?
## This will be moved into GridapSolvers in a future release

struct ElasticitySolver{A} <: LinearSolver
space ::A
Expand Down Expand Up @@ -31,7 +31,7 @@ function get_dof_coordinates(space::GridapDistributed.DistributedSingleFieldFESp
owner = part_id(dof_indices)
own_indices = OwnIndices(ngdofs,owner,own_to_global(dof_indices))
ghost_indices = GhostIndices(ngdofs,Int64[],Int32[]) # We only consider owned dofs
OwnAndGhostIndices(own_indices,ghost_indices)
OwnAndGhostIndices(own_indices,ghost_indices)
end
return PVector(coords,indices)
end
Expand Down Expand Up @@ -126,7 +126,7 @@ _num_dims(space::GridapDistributed.DistributedSingleFieldFESpace) = getany(map(_
function Gridap.Algebra.numerical_setup(ss::ElasticitySymbolicSetup,_A::PSparseMatrix)
s = ss.solver

# Create ns
# Create ns
A = convert(PETScMatrix,_A)
X = convert(PETScVector,allocate_in_domain(_A))
B = convert(PETScVector,allocate_in_domain(_A))
Expand All @@ -139,7 +139,7 @@ function Gridap.Algebra.numerical_setup(ss::ElasticitySymbolicSetup,_A::PSparseM
# Create matrix nullspace
@check_error_code GridapPETSc.PETSC.MatNullSpaceCreateRigidBody(dof_coords.vec[],ns.null)
@check_error_code GridapPETSc.PETSC.MatSetNearNullSpace(ns.A.mat[],ns.null[])

# Setup solver and preconditioner
@check_error_code GridapPETSc.PETSC.KSPCreate(ns.A.comm,ns.ksp)
@check_error_code GridapPETSc.PETSC.KSPSetOperators(ns.ksp[],ns.A.mat[],ns.A.mat[])
Expand All @@ -162,81 +162,4 @@ function Algebra.solve!(x::AbstractVector{PetscScalar},ns::ElasticityNumericalSe
@check_error_code GridapPETSc.PETSC.KSPSolve(ns.ksp[],B.vec[],X.vec[])
copy!(x,X)
return x
end

# MUMPS solver

function mumps_ksp_setup(ksp)
pc = Ref{GridapPETSc.PETSC.PC}()
mumpsmat = Ref{GridapPETSc.PETSC.Mat}()
@check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL)
@check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY)
@check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc)
@check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU)
@check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS)
@check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[])
@check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) # Level of printing
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 7, 0) # Perm type
# @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000)
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) # Seq or parallel analysis
@check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) # Parallel ordering
@check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) # Absolute pivoting threshold
end

function MUMPSSolver()
return PETScLinearSolver(mumps_ksp_setup)
end

# Block diagonal preconditioner

struct BlockDiagonalPreconditioner{N,A} <: Gridap.Algebra.LinearSolver
solvers :: A
function BlockDiagonalPreconditioner(solvers::AbstractArray{<:Gridap.Algebra.LinearSolver})
N = length(solvers)
A = typeof(solvers)
return new{N,A}(solvers)
end
end

struct BlockDiagonalPreconditionerSS{A,B} <: Gridap.Algebra.SymbolicSetup
solver :: A
block_ss :: B
end

function Gridap.Algebra.symbolic_setup(solver::BlockDiagonalPreconditioner,mat::AbstractBlockMatrix)
mat_blocks = diag(blocks(mat))
block_ss = map(symbolic_setup,solver.solvers,mat_blocks)
return BlockDiagonalPreconditionerSS(solver,block_ss)
end

struct BlockDiagonalPreconditionerNS{A,B} <: Gridap.Algebra.NumericalSetup
solver :: A
block_ns :: B
end

function Gridap.Algebra.numerical_setup(ss::BlockDiagonalPreconditionerSS,mat::AbstractBlockMatrix)
solver = ss.solver
mat_blocks = diag(blocks(mat))
block_ns = map(numerical_setup,ss.block_ss,mat_blocks)
return BlockDiagonalPreconditionerNS(solver,block_ns)
end

function Gridap.Algebra.numerical_setup!(ns::BlockDiagonalPreconditionerNS,mat::AbstractBlockMatrix)
mat_blocks = diag(blocks(mat))
map(numerical_setup!,ns.block_ns,mat_blocks)
end

function Gridap.Algebra.solve!(x::AbstractBlockVector,ns::BlockDiagonalPreconditionerNS,b::AbstractBlockVector)
@check blocklength(x) == blocklength(b) == length(ns.block_ns)
for (iB,bns) in enumerate(ns.block_ns)
xi = x[Block(iB)]
bi = b[Block(iB)]
solve!(xi,bns,bi)
end
return x
end

function LinearAlgebra.ldiv!(x,ns::BlockDiagonalPreconditionerNS,b)
solve!(x,ns,b)
end
end
108 changes: 108 additions & 0 deletions test/mpi/InverseHomogenisationALMTests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
module InverseHomogenisationALMTestsMPI
using Test

using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers,
PartitionedArrays, GridapTopOpt, SparseMatricesCSR

"""
(MPI) Maximum bulk modulus inverse homogenisation with augmented Lagrangian method in 2D.
Optimisation problem:
Min J(Ω) = -κ(Ω)
Ω
s.t., Vol(Ω) = vf,
⎡For unique εᴹᵢ, find uᵢ∈V=H¹ₚₑᵣ(Ω)ᵈ,
⎣∫ ∑ᵢ C ⊙ ε(uᵢ) ⊙ ε(vᵢ) dΩ = ∫ -∑ᵢ C ⊙ ε⁰ᵢ ⊙ ε(vᵢ) dΩ, ∀v∈V.
"""
function main(distribute,mesh_partition;AD)
ranks = distribute(LinearIndices((prod(mesh_partition),)))
## Parameters
order = 1
xmax,ymax=(1.0,1.0)
dom = (0,xmax,0,ymax)
el_size = (10,10)
γ = 0.05
γ_reinit = 0.5
max_steps = floor(Int,order*minimum(el_size)/10)
tol = 1/(5order^2)/minimum(el_size)
C = isotropic_elast_tensor(2,1.,0.3)
η_coeff = 2
α_coeff = 4max_steps*γ
vf = 0.5

## FE Setup
model = CartesianDiscreteModel(ranks,mesh_partition,dom,el_size,isperiodic=(true,true))
el_Δ = get_el_Δ(model)
f_Γ_D(x) = iszero(x)
update_labels!(1,model,f_Γ_D,"origin")

## Triangulations and measures
Ω = Triangulation(model)
= Measure(Ω,2*order)
vol_D = sum((1)dΩ)

## Spaces
reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order)
reffe_scalar = ReferenceFE(lagrangian,Float64,order)
V = TestFESpace(model,reffe;dirichlet_tags=["origin"])
U = TrialFESpace(V,VectorValue(0.0,0.0))
V_reg = V_φ = TestFESpace(model,reffe_scalar)
U_reg = TrialFESpace(V_reg)

## Create FE functions
lsf_fn = x->max(initial_lsf(2,0.4)(x),initial_lsf(2,0.4;b=VectorValue(0,0.5))(x))
φh = interpolate(lsf_fn,V_φ)

## Interpolation and weak form
interp = SmoothErsatzMaterialInterpolation= η_coeff*maximum(el_Δ))
I,H,DH,ρ = interp.I,interp.H,interp.DH,interp.ρ

εᴹ = (TensorValue(1.,0.,0.,0.), # ϵᵢⱼ⁽¹¹⁾≡ϵᵢⱼ⁽¹⁾
TensorValue(0.,0.,0.,1.), # ϵᵢⱼ⁽²²⁾≡ϵᵢⱼ⁽²⁾
TensorValue(0.,1/2,1/2,0.)) # ϵᵢⱼ⁽¹²⁾≡ϵᵢⱼ⁽³⁾

a(u,v,φ,dΩ) = ((I φ) * C ε(u) ε(v) )dΩ
l = [(v,φ,dΩ) -> (-(I φ)* C εᴹ[i] ε(v))dΩ for i in 1:3]

## Optimisation functionals
Cᴴ(r,s,u,φ,dΩ) = ((I φ)*(C (ε(u[r])+εᴹ[r]) εᴹ[s]))dΩ
dCᴴ(r,s,q,u,φ,dΩ) = (-q*(C (ε(u[r])+εᴹ[r]) (ε(u[s])+εᴹ[s]))*(DH φ)*(norm (φ)))dΩ
κ(u,φ,dΩ) = -1/4*(Cᴴ(1,1,u,φ,dΩ)+Cᴴ(2,2,u,φ,dΩ)+2*Cᴴ(1,2,u,φ,dΩ))
(q,u,φ,dΩ) = -1/4*(dCᴴ(1,1,q,u,φ,dΩ)+dCᴴ(2,2,q,u,φ,dΩ)+2*dCᴴ(1,2,q,u,φ,dΩ))
Vol(u,φ,dΩ) = (((ρ φ) - vf)/vol_D)dΩ
dVol(q,u,φ,dΩ) = (-1/vol_D*q*(DH φ)*(norm (φ)))dΩ

## Finite difference solver and level set function
ls_evo = HamiltonJacobiEvolution(FirstOrderStencil(2,Float64),model,V_φ,tol,max_steps)

## Setup solver and FE operators
state_map = RepeatingAffineFEStateMap(3,a,l,U,V,V_φ,U_reg,φh,dΩ)
pcfs = if AD
PDEConstrainedFunctionals(κ,[Vol],state_map;analytic_dJ=dκ,analytic_dC=[dVol])
else
PDEConstrainedFunctionals(κ,[Vol],state_map)
end

## Hilbertian extension-regularisation problems
α = α_coeff*maximum(el_Δ)
a_hilb(p,q) =^2*(p)(q) + p*q)dΩ
vel_ext = VelocityExtension(a_hilb,U_reg,V_reg)

## Optimiser
optimiser = AugmentedLagrangian(pcfs,ls_evo,vel_ext,φh;
γ,γ_reinit,verbose=i_am_main(ranks),constraint_names=[:Vol])

# Do a few iterations
vars, state = iterate(optimiser)
vars, state = iterate(optimiser,state)
true
end

# Test that these run successfully
with_mpi() do distribute
@test main(distribute,(2,2);AD=true)
@test main(distribute,(2,2);AD=false)
end


end # module
118 changes: 118 additions & 0 deletions test/mpi/InverterHPMTests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
module InverterHPMTestsMPI
using Test

using Gridap, Gridap.MultiField, GridapDistributed, GridapPETSc, GridapSolvers,
PartitionedArrays, GridapTopOpt, SparseMatricesCSR

"""
(MPI) Inverter mechanism with Hilbertian projection method in 2D.
Optimisation problem:
Min J(Ω) = ηᵢₙ*∫ u⋅e₁ dΓᵢₙ/Vol(Γᵢₙ)
Ω
s.t., Vol(Ω) = vf,
C(Ω) = 0,
⎡u∈V=H¹(Ω;u(Γ_D)=0)ᵈ,
⎣∫ C ⊙ ε(u) ⊙ ε(v) dΩ + ∫ kₛv⋅u dΓₒᵤₜ = ∫ v⋅g dΓᵢₙ , ∀v∈V.
where C(Ω) = ∫ -u⋅e₁-δₓ dΓₒᵤₜ/Vol(Γₒᵤₜ). We assume symmetry in the problem to aid
convergence.
"""
function main(distribute,mesh_partition)
ranks = distribute(LinearIndices((prod(mesh_partition),)))
## Parameters
order = 1
dom = (0,1,0,0.5)
el_size = (10,10)
γ = 0.1
γ_reinit = 0.5
max_steps = floor(Int,order*minimum(el_size)/10)
tol = 1/(5order^2)/minimum(el_size)
C = isotropic_elast_tensor(2,1.0,0.3)
η_coeff = 2
α_coeff = 4max_steps*γ
vf = 0.4
δₓ = 0.2
ks = 0.1
g = VectorValue(0.5,0)

## FE Setup
model = CartesianDiscreteModel(ranks,mesh_partition,dom,el_size)
el_Δ = get_el_Δ(model)
f_Γ_in(x) = (x[1] 0.0) && (x[2] <= 0.03 + eps())
f_Γ_out(x) = (x[1] 1.0) && (x[2] <= 0.07 + eps())
f_Γ_D(x) = (x[1] 0.0) && (x[2] >= 0.4)
f_Γ_D2(x) = (x[2] 0.0)
update_labels!(1,model,f_Γ_in,"Gamma_in")
update_labels!(2,model,f_Γ_out,"Gamma_out")
update_labels!(3,model,f_Γ_D,"Gamma_D")
update_labels!(4,model,f_Γ_D2,"SymLine")

## Triangulations and measures
Ω = Triangulation(model)
Γ_in = BoundaryTriangulation(model,tags="Gamma_in")
Γ_out = BoundaryTriangulation(model,tags="Gamma_out")
= Measure(Ω,2order)
dΓ_in = Measure(Γ_in,2order)
dΓ_out = Measure(Γ_out,2order)
vol_D = sum((1)dΩ)
vol_Γ_in = sum((1)dΓ_in)
vol_Γ_out = sum((1)dΓ_out)

## Spaces
reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order)
reffe_scalar = ReferenceFE(lagrangian,Float64,order)
V = TestFESpace(model,reffe;dirichlet_tags=["Gamma_D","SymLine"],
dirichlet_masks=[(true,true),(false,true)])
U = TrialFESpace(V,[VectorValue(0.0,0.0),VectorValue(0.0,0.0)])
V_φ = TestFESpace(model,reffe_scalar)
V_reg = TestFESpace(model,reffe_scalar;dirichlet_tags=["Gamma_in","Gamma_out"])
U_reg = TrialFESpace(V_reg,[0,0])

## Create FE functions
lsf_fn(x) = min(max(initial_lsf(6,0.2)(x),-sqrt((x[1]-1)^2+(x[2]-0.5)^2)+0.2),
sqrt((x[1])^2+(x[2]-0.5)^2)-0.1)
φh = interpolate(lsf_fn,V_φ)

## Interpolation and weak form
interp = SmoothErsatzMaterialInterpolation= η_coeff*maximum(el_Δ))
I,H,DH,ρ = interp.I,interp.H,interp.DH,interp.ρ

a(u,v,φ,dΩ,dΓ_in,dΓ_out) = ((I φ)*(C ε(u) ε(v)))dΩ + (ks*(uv))dΓ_out
l(v,φ,dΩ,dΓ_in,dΓ_out) = (vg)dΓ_in

## Optimisation functionals
e₁ = VectorValue(1,0)
J(u,φ,dΩ,dΓ_in,dΓ_out) = ((ue₁)/vol_Γ_in)dΓ_in
Vol(u,φ,dΩ,dΓ_in,dΓ_out) = (((ρ φ) - vf)/vol_D)dΩ
dVol(q,u,φ,dΩ,dΓ_in,dΓ_out) = (-1/vol_D*q*(DH φ)*(norm (φ)))dΩ
UΓ_out(u,φ,dΩ,dΓ_in,dΓ_out) = ((u⋅-e₁-δₓ)/vol_Γ_out)dΓ_out

## Finite difference solver and level set function
ls_evo = HamiltonJacobiEvolution(FirstOrderStencil(2,Float64),model,V_φ,tol,max_steps)

## Setup solver and FE operators
state_map = AffineFEStateMap(a,l,U,V,V_φ,U_reg,φh,dΩ,dΓ_in,dΓ_out)
pcfs = PDEConstrainedFunctionals(J,[Vol,UΓ_out],state_map,analytic_dC=[dVol,nothing])

## Hilbertian extension-regularisation problems
α = α_coeff*maximum(el_Δ)
a_hilb(p,q) = ^2*(p)(q) + p*q)dΩ;
vel_ext = VelocityExtension(a_hilb,U_reg,V_reg)

## Optimiser
optimiser = HilbertianProjection(pcfs,ls_evo,vel_ext,φh;
γ,γ_reinit,verbose=i_am_main(ranks),debug=i_am_main(ranks),constraint_names=[:Vol,:UΓ_out])

# Do a few iterations
vars, state = iterate(optimiser)
vars, state = iterate(optimiser,state)
true
end

# Test that these run successfully
with_mpi() do distribute
@test main(distribute,(2,2))
end

end # module
Loading

0 comments on commit 25cff26

Please sign in to comment.