Skip to content

Commit

Permalink
Merge branch 'main' into origin/benedicto/followUpMeshStats
Browse files Browse the repository at this point in the history
  • Loading branch information
alexbenedicto authored Dec 18, 2024
2 parents 4b200a7 + 1e91901 commit ce8f650
Show file tree
Hide file tree
Showing 10 changed files with 623 additions and 345 deletions.
3 changes: 2 additions & 1 deletion docs/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
sphinx >= 7.4.7
sphinx_rtd_theme
sphinx-argparse
sphinx-argparse >= 0.5.2
sphinx-design
# Running CLI programs and capture outputs
sphinxcontrib-programoutput>=0.17
Expand Down
6 changes: 4 additions & 2 deletions geos-ats/src/geos/ats/helpers/curve_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,18 @@ def interpolate_values_time( ta, xa, tb ):
"""
N = list( np.shape( xa ) )
M = len( tb )
ta = np.ascontiguousarray( np.squeeze( ta ) )
tb = np.ascontiguousarray( np.squeeze( tb ) )

if ( len( N ) == 1 ):
return interp1d( ta, xa )( tb )
else:
# Reshape the input array so that we can work on the non-time axes
S = np.product( N[ 1: ] )
S = np.prod( N[ 1: ] )
xc = np.reshape( xa, ( N[ 0 ], S ) )
xd = np.zeros( ( len( tb ), S ) )
for ii in range( S ):
xd[ :, ii ] = interp1d( ta, xc[ :, ii ] )( tb )
xd[ :, ii ] = interp1d( ta, xc[ :, ii ], bounds_error=False, fill_value='extrapolate' )( tb )

# Return the array to it's expected shape
N[ 0 ] = M
Expand Down
108 changes: 87 additions & 21 deletions geos-ats/src/geos/ats/helpers/restart_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import argparse
import logging
import time
import string
from pathlib import Path
try:
from geos.ats.helpers.permute_array import permuteArray # type: ignore[import]
Expand Down Expand Up @@ -375,35 +376,100 @@ def compareIntArrays( self, path, arr, base_arr ):
ARR [in]: The hdf5 Dataset to compare.
BASE_ARR [in]: The hdf5 Dataset to compare against.
"""
# If the shapes are different they can't be compared.
message = ""
if arr.shape != base_arr.shape:
msg = "Datasets have different shapes and therefore can't be compared: %s, %s.\n" % ( arr.shape,
base_arr.shape )
self.errorMsg( path, msg, True )
return
message = "Datasets have different shapes and therefore can't be compared statistically: %s, %s.\n" % (
arr.shape, base_arr.shape )
else:
# Calculate the absolute difference.
difference = np.subtract( arr, base_arr )
np.abs( difference, out=difference )

# Create a copy of the arrays.
offenders = difference != 0.0
n_offenders = np.sum( offenders )

# Calculate the absolute difference.
difference = np.subtract( arr, base_arr )
np.abs( difference, out=difference )
if n_offenders != 0:
max_index = np.unravel_index( np.argmax( difference ), difference.shape )
max_difference = difference[ max_index ]
offenders_mean = np.mean( difference[ offenders ] )
offenders_std = np.std( difference[ offenders ] )

offenders = difference != 0.0
n_offenders = np.sum( offenders )
message = "Arrays of types %s and %s have %s values of which %d have differing values.\n" % (
arr.dtype, base_arr.dtype, offenders.size, n_offenders )
message += "Statistics of the differences greater than 0:\n"
message += "\tmax_index = %s, max = %s, mean = %s, std = %s\n" % ( max_index, max_difference,
offenders_mean, offenders_std )

if n_offenders != 0:
max_index = np.unravel_index( np.argmax( difference ), difference.shape )
max_difference = difference[ max_index ]
offenders_mean = np.mean( difference[ offenders ] )
offenders_std = np.std( difference[ offenders ] )
# actually, int8 arrays are almost always char arrays, so we sould add a character comparison.
if arr.dtype == np.int8 and base_arr.dtype == np.int8:
message += self.compareCharArrays( arr, base_arr )

message = "Arrays of types %s and %s have %s values of which %d have differing values.\n" % (
arr.dtype, base_arr.dtype, offenders.size, n_offenders )
message += "Statistics of the differences greater than 0:\n"
message += "\tmax_index = %s, max = %s, mean = %s, std = %s\n" % ( max_index, max_difference,
offenders_mean, offenders_std )
if message != "":
self.errorMsg( path, message, True )

def compareCharArrays( self, comp_arr, base_arr ):
"""
Compare the valid characters of two arrays and return a formatted string showing differences.
COMP_ARR [in]: The hdf5 Dataset to compare.
BASE_ARR [in]: The hdf5 Dataset to compare against.
Returns a formatted string highlighting the differing characters.
"""
comp_ndarr = np.array( comp_arr ).flatten()
base_ndarr = np.array( base_arr ).flatten()

# Replace invalid characters by group-separator characters ('\x1D')
valid_chars = set( string.printable )
invalid_char = '\x1D'
comp_str = "".join(
[ chr( x ) if ( x >= 0 and chr( x ) in valid_chars ) else invalid_char for x in comp_ndarr ] )
base_str = "".join(
[ chr( x ) if ( x >= 0 and chr( x ) in valid_chars ) else invalid_char for x in base_ndarr ] )

# replace whitespaces sequences by only one space (preventing indentation / spacing changes detection)
whitespace_pattern = r"[ \t\n\r\v\f]+"
comp_str = re.sub( whitespace_pattern, " ", comp_str )
base_str = re.sub( whitespace_pattern, " ", base_str )
# replace invalid characters sequences by a double space (for clear display)
invalid_char_pattern = r"\x1D+"
comp_str_display = re.sub( invalid_char_pattern, " ", comp_str )
base_str_display = re.sub( invalid_char_pattern, " ", base_str )

message = ""

def limited_display( n, string ):
return string[ :n ] + f"... ({len(string)-n} omitted chars)" if len( string ) > n else string

if len( comp_str ) != len( base_str ):
max_display = 250
message = f"Character arrays have different sizes: {len( comp_str )}, {len( base_str )}.\n"
message += f" {limited_display( max_display, comp_str_display )}\n"
message += f" {limited_display( max_display, base_str_display )}\n"
else:
# We need to trim arrays to the length of the shortest one for the comparisons
min_length = min( len( comp_str_display ), len( base_str_display ) )
comp_str_trim = comp_str_display[ :min_length ]
base_str_trim = base_str_display[ :min_length ]

differing_indices = np.where( np.array( list( comp_str_trim ) ) != np.array( list( base_str_trim ) ) )[ 0 ]
if differing_indices.size != 0:
# check for reordering
arr_set = sorted( set( comp_str.split( invalid_char ) ) )
base_arr_set = sorted( set( base_str.split( invalid_char ) ) )
reordering_detected = arr_set == base_arr_set

max_display = 110 if reordering_detected else 250
message = "Differing valid characters"
message += " (substrings reordering detected):\n" if reordering_detected else ":\n"

message += f" {limited_display( max_display, comp_str_display )}\n"
message += f" {limited_display( max_display, base_str_display )}\n"
message += " " + "".join(
[ "^" if i in differing_indices else " " for i in range( min( max_display, min_length ) ) ] ) + "\n"

return message

def compareStringArrays( self, path, arr, base_arr ):
"""
Compare two string datasets. Exact equality is used as the acceptance criteria.
Expand Down
13 changes: 9 additions & 4 deletions geos-ats/src/geos/ats/test_case.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import ats # type: ignore[import]
import ats # type: ignore[import]
import os
import shutil
import logging
Expand Down Expand Up @@ -319,7 +319,7 @@ def testCreate( self ):
priority = 1

# Setup a new test group
atsTest = None
parent_test = None
ats.tests.AtsTest.newGroup( priority=priority, path=self.path )
for stepnum, step in enumerate( self.steps ):
np = getattr( step.p, "np", 1 )
Expand All @@ -329,10 +329,10 @@ def testCreate( self ):
label = "%s_%d_%s" % ( self.name, stepnum + 1, step.label() )

# call either 'test' or 'testif'
if atsTest is None:
if parent_test is None:
func = lambda *a, **k: test( *a, **k )
else:
func = lambda *a, **k: testif( atsTest, *a, **k )
func = lambda *a, **k: testif( parent_test, *a, **k )

# Set the time limit
kw = {}
Expand All @@ -359,6 +359,11 @@ def testCreate( self ):
if self.last_status == PASSED:
atsTest.status = SKIPPED

# Set the parent test
# Note: do not make tests dependent on the result of curvecheck
if step.label() != 'curvecheck':
parent_test = atsTest

# End the group
ats.tests.AtsTest.endGroup()

Expand Down
3 changes: 1 addition & 2 deletions geos-mesh/src/geos/mesh/conversion/main.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import argparse
import logging
import sys
from geos.mesh.conversion import abaqus_converter


def build_abaqus_converter_input_parser() -> argparse.ArgumentParser:
Expand All @@ -25,8 +26,6 @@ def main() -> None:
output (str): Output mesh file name
-v/--verbose (flag): Increase verbosity level
"""
from geosx_mesh_tools import abaqus_converter

# Parse the user arguments
parser = build_abaqus_converter_input_parser()
args = parser.parse_args()
Expand Down
1 change: 1 addition & 0 deletions geos-mesh/src/geos/mesh/doctor/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Empty
53 changes: 18 additions & 35 deletions geos-mesh/src/geos/mesh/doctor/checks/check_fractures.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,14 @@
from dataclasses import dataclass
import logging

from typing import (
Collection,
FrozenSet,
Iterable,
Sequence,
Set,
Tuple,
)

from tqdm import tqdm
import numpy

from vtkmodules.vtkCommonDataModel import (
vtkUnstructuredGrid,
vtkCell,
)
from vtkmodules.vtkCommonCore import (
vtkPoints, )
from vtkmodules.vtkIOXML import (
vtkXMLMultiBlockDataReader, )
from vtkmodules.util.numpy_support import (
vtk_to_numpy, )
from vtk_utils import (
vtk_iter, )
from dataclasses import dataclass
from typing import Collection, Iterable, Sequence
from tqdm import tqdm
from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid, vtkCell
from vtkmodules.vtkCommonCore import vtkPoints
from vtkmodules.vtkIOXML import vtkXMLMultiBlockDataReader
from vtkmodules.util.numpy_support import vtk_to_numpy
from geos.mesh.doctor.checks.vtk_utils import vtk_iter
from geos.mesh.doctor.checks.generate_fractures import Coordinates3D


@dataclass( frozen=True )
Expand All @@ -44,7 +28,7 @@ class Result:


def __read_multiblock( vtk_input_file: str, matrix_name: str,
fracture_name: str ) -> Tuple[ vtkUnstructuredGrid, vtkUnstructuredGrid ]:
fracture_name: str ) -> tuple[ vtkUnstructuredGrid, vtkUnstructuredGrid ]:
reader = vtkXMLMultiBlockDataReader()
reader.SetFileName( vtk_input_file )
reader.Update()
Expand Down Expand Up @@ -73,9 +57,9 @@ def format_collocated_nodes( fracture_mesh: vtkUnstructuredGrid ) -> Sequence[ I


def __check_collocated_nodes_positions(
matrix_points: Sequence[ Tuple[ float, float, float ] ], fracture_points: Sequence[ Tuple[ float, float, float ] ],
g2l: Sequence[ int ], collocated_nodes: Iterable[ Iterable[ int ] ]
) -> Collection[ Tuple[ int, Iterable[ int ], Iterable[ Tuple[ float, float, float ] ] ] ]:
matrix_points: Sequence[ Coordinates3D ], fracture_points: Sequence[ Coordinates3D ], g2l: Sequence[ int ],
collocated_nodes: Iterable[ Iterable[ int ] ]
) -> Collection[ tuple[ int, Iterable[ int ], Iterable[ Coordinates3D ] ] ]:
issues = []
for li, bucket in enumerate( collocated_nodes ):
matrix_nodes = ( fracture_points[ li ], ) + tuple( map( lambda gi: matrix_points[ g2l[ gi ] ], bucket ) )
Expand All @@ -98,14 +82,14 @@ def my_iter( ccc ):

def __check_neighbors( matrix: vtkUnstructuredGrid, fracture: vtkUnstructuredGrid, g2l: Sequence[ int ],
collocated_nodes: Sequence[ Iterable[ int ] ] ):
fracture_nodes: Set[ int ] = set()
fracture_nodes: set[ int ] = set()
for bucket in collocated_nodes:
for gi in bucket:
fracture_nodes.add( g2l[ gi ] )
# For each face of each cell,
# if all the points of the face are "made" of collocated nodes,
# then this is a fracture face.
fracture_faces: Set[ FrozenSet[ int ] ] = set()
fracture_faces: set[ frozenset[ int ] ] = set()
for c in range( matrix.GetNumberOfCells() ):
cell: vtkCell = matrix.GetCell( c )
for f in range( cell.GetNumberOfFaces() ):
Expand All @@ -116,7 +100,7 @@ def __check_neighbors( matrix: vtkUnstructuredGrid, fracture: vtkUnstructuredGri
# Finding the cells
for c in tqdm( range( fracture.GetNumberOfCells() ), desc="Finding neighbor cell pairs" ):
cell: vtkCell = fracture.GetCell( c )
cns: Set[ FrozenSet[ int ] ] = set() # subset of collocated_nodes
cns: set[ frozenset[ int ] ] = set() # subset of collocated_nodes
point_ids = frozenset( vtk_iter( cell.GetPointIds() ) )
for point_id in point_ids:
bucket = collocated_nodes[ point_id ]
Expand All @@ -129,9 +113,8 @@ def __check_neighbors( matrix: vtkUnstructuredGrid, fracture: vtkUnstructuredGri
if f in fracture_faces:
found += 1
if found != 2:
logging.warning(
f"Something went wrong since we should have found 2 fractures faces (we found {found}) for collocated nodes {cns}."
)
logging.warning( f"Something went wrong since we should have found 2 fractures faces (we found {found})" +
f" for collocated nodes {cns}." )


def __check( vtk_input_file: str, options: Options ) -> Result:
Expand Down
Loading

0 comments on commit ce8f650

Please sign in to comment.