Skip to content

Commit

Permalink
Merge pull request #754 from kinnala/add-adaptive-tetref
Browse files Browse the repository at this point in the history
Add adaptive tet refinement
  • Loading branch information
kinnala authored Sep 27, 2021
2 parents 9241274 + 1e67a45 commit d71b4ab
Show file tree
Hide file tree
Showing 4 changed files with 253 additions and 1 deletion.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ with respect to documented and/or tested features.
- Added: `ElementTri15ParamPlate`, 15-parameter nonconforming triangular element for plates
- Added: `ElementTriBDM1`, the lowest order Brezzi-Douglas-Marini element
- Added: `Mesh.draw().show()` will now visualize any mesh interactively (requires [vedo](https://vedo.embl.es/))
- Added: Adaptive refinement for `MeshTet1`
- Fixed: `MappingIsoparametric` is now about 2x faster for large meshes thanks
to additional caching
- Fixed: `MeshHex2.save` did not work properly
Expand Down
20 changes: 19 additions & 1 deletion skfem/mesh/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,12 +433,30 @@ def __matmul__(self, other):
]
raise NotImplementedError

def is_valid(self) -> bool:
"""Perform some mesh validation checks."""
# check that there are no duplicate points
tmp = np.ascontiguousarray(self.p.T)
if self.p.shape[1] != np.unique(tmp.view([('', tmp.dtype)]
* tmp.shape[1])).shape[0]:
warn("Mesh contains duplicate vertices.")
return False

# check that all points are at least in some element
if len(np.setdiff1d(np.arange(self.p.shape[1]),
np.unique(self.t))) > 0:
warn("Mesh contains a vertex not belonging to any element.")
return False

return True

def __add__(self, other):
"""Join two meshes."""
cls = type(self)
if not isinstance(other, cls):
raise TypeError("Can only join meshes with same type.")
p = np.hstack((self.p, other.p))
p = np.hstack((self.p.round(decimals=8),
other.p.round(decimals=8)))
t = np.hstack((self.t, other.t + self.p.shape[1]))
tmp = np.ascontiguousarray(p.T)
tmp, ixa, ixb = np.unique(tmp.view([('', tmp.dtype)] * tmp.shape[1]),
Expand Down
157 changes: 157 additions & 0 deletions skfem/mesh/mesh_tet_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,163 @@ def _uniform(self):
_subdomains=None,
)

@staticmethod
def _adaptive_sort_mesh(p, t, marked):
"""Make (0, 1) the longest edge in t for marked."""

# add noise so that there are no edges with the same length
np.random.seed(1337)
p = p.copy() + 1e-5 * np.random.random(p.shape)

l01 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[1, marked]]) ** 2,
axis=0))
l12 = np.sqrt(np.sum((p[:, t[1, marked]] - p[:, t[2, marked]]) ** 2,
axis=0))
l02 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[2, marked]]) ** 2,
axis=0))
l03 = np.sqrt(np.sum((p[:, t[0, marked]] - p[:, t[3, marked]]) ** 2,
axis=0))
l13 = np.sqrt(np.sum((p[:, t[1, marked]] - p[:, t[3, marked]]) ** 2,
axis=0))
l23 = np.sqrt(np.sum((p[:, t[2, marked]] - p[:, t[3, marked]]) ** 2,
axis=0))

# indices where (1, 2) is the longest etc.
ix12 = ((l12 > l01)
* (l12 > l02)
* (l12 > l03)
* (l12 > l13)
* (l12 > l23))
ix02 = ((l02 > l01)
* (l02 > l12)
* (l02 > l03)
* (l02 > l13)
* (l02 > l23))
ix03 = ((l03 > l01)
* (l03 > l12)
* (l03 > l02)
* (l03 > l13)
* (l03 > l23))
ix13 = ((l13 > l01)
* (l13 > l12)
* (l13 > l02)
* (l13 > l03)
* (l13 > l23))
ix23 = ((l23 > l01)
* (l23 > l12)
* (l23 > l02)
* (l23 > l03)
* (l23 > l13))

# flip edges
T = t.copy()
T[:, marked[ix02]] = t[:, marked[ix02]][[2, 0, 1, 3]]
T[:, marked[ix03]] = t[:, marked[ix03]][[0, 3, 1, 2]]
T[:, marked[ix12]] = t[:, marked[ix12]][[1, 2, 0, 3]]
T[:, marked[ix13]] = t[:, marked[ix13]][[1, 3, 2, 0]]
T[:, marked[ix23]] = t[:, marked[ix23]][[3, 2, 1, 0]]

return T

def _find_nz(self, rows, cols, shape, transform=None):
"""Find nonzero entries from the incidence matrix after transform."""
from scipy.sparse import coo_matrix, find
rows = rows.flatten('C')
cols = cols.flatten('C')
inc = coo_matrix((np.ones(len(rows)), (rows, cols)),
shape=shape).tocsr()
if transform is not None:
inc = transform(inc)
inc.eliminate_zeros()
return find(inc)[:2]

def _adaptive(self, marked):
"""Longest edge bisection."""
if isinstance(marked, list):
marked = np.array(marked, dtype=np.int64)
nt = self.t.shape[1]
nv = self.p.shape[1]
p = np.zeros((3, 9 * nv), dtype=np.float64)
t = np.zeros((4, 4 * nt), dtype=np.int64)
p[:, :self.p.shape[1]] = self.p.copy()
t[:, :self.t.shape[1]] = self.t.copy()

gen = np.zeros(nv + 6 * nt, dtype=np.int8)
nonconf = np.ones(8 * nv, dtype=np.int8)
split_edge = np.zeros((3, 8 * nv), dtype=np.int64)
ns = 0

while len(marked) > 0:
nm = len(marked)
tnew = np.zeros(nm, dtype=np.int64)
ix = np.arange(nm, dtype=np.int64)
t = self._adaptive_sort_mesh(p, t, marked)
t0, t1, t2, t3 = t[:, marked]

if ns > 0:
nonconf_edge = np.nonzero(nonconf[:ns])[0]
i, j = self._find_nz(
split_edge[:2, nonconf_edge],
np.vstack((split_edge[2, nonconf_edge],) * 2),
(nv, nv),
lambda I: I[t0].multiply(I[t1])
)
tnew[i] = j
ix = np.nonzero(tnew == 0)[0]

if len(ix) > 0:
i, j = self._find_nz(
*np.sort(np.vstack((t0[ix], t1[ix])), axis=0),
(nv, nv),
)
nn = len(i)
nix = slice(ns, ns + nn)
split_edge[0, nix] = i
split_edge[1, nix] = j
split_edge[2, nix] = np.arange(nv, nv + nn, dtype=np.int64)

# add new points
p[:, nv:(nv + nn)] = .5 * (p[:, i] + p[:, j])
nv += nn
i, j = self._find_nz(
split_edge[:2, nix],
np.vstack((split_edge[2, nix],) * 2),
(nv, nv),
lambda I: I[t0].multiply(I[t1])
)
tnew[i] = j
ns += nn

ix = np.nonzero(gen[tnew] == 0)[0]
gen[tnew[ix]] = np.max(gen[t[:, marked[ix]]], axis=0) + 1

# add new elements
t[:, marked] = np.vstack((t3, t0, t2, tnew))
t[:, nt:(nt + nm)] = np.vstack((t2, t1, t3, tnew))
nt += nm

check = np.nonzero(nonconf[:ns])[0]
nonconf[check] = 0
check_node = np.zeros(nv, dtype=np.int64)
check_node[split_edge[:2, check]] = 1
check_elem = np.nonzero(check_node[t[:, :nt]].sum(axis=0))[0]

i, j = self._find_nz(
t[:, check_elem],
np.vstack((check_elem,) * 4),
(nv, nt),
lambda I: (I[split_edge[0, check]]
.multiply(I[split_edge[1, check]]))
)
nonconf[check[i]] = 1
marked = np.unique(j)

return replace(
self,
doflocs=p[:, :nv],
t=t[:, :nt],
)

@classmethod
def init_tensor(cls: Type,
x: ndarray,
Expand Down
76 changes: 76 additions & 0 deletions tests/test_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from skfem.mesh import (Mesh, MeshHex, MeshLine, MeshQuad, MeshTet, MeshTri,
MeshTri2, MeshQuad2, MeshTet2, MeshHex2, MeshLine1DG,
MeshQuad1DG, MeshHex2, MeshTri1DG)
from skfem.assembly import Basis, LinearForm
from skfem.element import ElementTetP1
from skfem.utils import projection
from skfem.io.meshio import to_meshio, from_meshio
from skfem.io.json import to_dict, from_dict

Expand Down Expand Up @@ -202,6 +205,79 @@ def runTest(self):
self.assertEqual(prev_p_size, m.p.shape[1] - 3)


class TestAdaptiveSplitting3D(TestCase):

def runTest(self):

m = MeshTet()
for itr in range(10):
m = m.refined([itr, itr + 1, itr + 2])
assert m.is_valid()

m = MeshTet()
for itr in range(10):
m = m.refined([itr, itr + 1])
assert m.is_valid()

m = MeshTet()
for itr in range(50):
m = m.refined([itr])
assert m.is_valid()

m = MeshTet()
for itr in range(5):
m = m.refined(np.arange(m.nelements, dtype=np.int64))
assert m.is_valid()

# adaptively refine one face of a cube, check that the mesh parameter h
# is approximately linear w.r.t to distance from the face
m = MeshTet.init_tensor(np.linspace(0, 1, 3),
np.linspace(0, 1, 3),
np.linspace(0, 1, 3))

for itr in range(15):
m = m.refined(m.f2t[0, m.facets_satisfying(lambda x: x[0] == 0)])

@LinearForm
def hproj(v, w):
return w.h * v

basis = Basis(m, ElementTetP1())
h = projection(hproj, basis)

funh = basis.interpolator(h)

xs = np.vstack((
np.linspace(0, .5, 20),
np.zeros(20) + .5,
np.zeros(20) + .5,
))
hs = funh(xs)

assert np.max(np.abs(hs - xs[0])) < 0.063

# check that the same mesh is reproduced by any future versions
m = MeshTet.init_tensor(np.linspace(0, 1, 2),
np.linspace(0, 1, 2),
np.linspace(0, 1, 2))

m = m.refined(m.f2t[0, m.facets_satisfying(lambda x: x[0] == 0)])

assert_array_equal(
m.p,
np.array([[0. , 0. , 1. , 1. , 0. , 0. , 1. , 1. , 0.5],
[0. , 1. , 0. , 1. , 0. , 1. , 0. , 1. , 0.5],
[0. , 0. , 0. , 0. , 1. , 1. , 1. , 1. , 0.5]])
)

assert_array_equal(
m.t,
np.array([[5, 3, 3, 5, 6, 6, 1, 4, 1, 2, 2, 4],
[0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7],
[1, 1, 2, 4, 2, 4, 5, 5, 3, 3, 6, 6],
[8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8]])
)

class TestMirrored(TestCase):

def runTest(self):
Expand Down

0 comments on commit d71b4ab

Please sign in to comment.