From b3de25ea657dff5e6d0ed36e202f60fdf41827c2 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 18 Mar 2022 16:05:50 -0500 Subject: [PATCH 01/30] use partition-ID-enabled meshmode --- grudge/discretization.py | 289 +++++++++++++++++++-------------------- grudge/dof_desc.py | 22 +-- grudge/trace_pair.py | 185 +++++++++++++++---------- test/test_grudge.py | 23 ++-- 4 files changed, 262 insertions(+), 257 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 63b8564f3..4cc7fe639 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -8,8 +8,6 @@ .. currentmodule:: grudge.discretization -.. autofunction:: relabel_partitions - Internal things that are visble due to type annotations ------------------------------------------------------- @@ -41,13 +39,14 @@ THE SOFTWARE. """ -from typing import Mapping, Optional, Union, Tuple, TYPE_CHECKING, Any +from typing import Mapping, Optional, Union, Tuple, Callable, TYPE_CHECKING, Any from pytools import memoize_method, single_valued +from dataclasses import dataclass + from grudge.dof_desc import ( VTAG_ALL, - BTAG_MULTIVOL_PARTITION, DD_VOLUME_ALL, DISCR_TAG_BASE, DISCR_TAG_MODAL, @@ -70,8 +69,7 @@ make_face_restriction, DiscretizationConnection ) -from meshmode.mesh import ( - InterPartitionAdjacencyGroup, Mesh, BTAG_PARTITION, BoundaryTag) +from meshmode.mesh import Mesh, BTAG_PARTITION, PartitionID from meshmode.dof_array import DOFArray from warnings import warn @@ -127,6 +125,27 @@ def _normalize_discr_tag_to_group_factory( # }}} +# {{{ _PartitionIDHelper + +@dataclass(frozen=True, init=True, eq=True) +class _PartitionIDHelper: + """ + Provides a unified interface for creating and inspecting partition identifiers + that does not depend on whether the problem is distributed, multi-volume, etc. + + .. attribute:: make + .. attribute:: get_mpi_rank + .. attribute:: get_volume + + .. automethod:: __init__ + """ + make: Callable[[Optional[int], VolumeTag], PartitionID] + get_mpi_rank: Callable[[PartitionID], Optional[int]] + get_volume: Callable[[PartitionID], VolumeTag] + +# }}} + + class DiscretizationCollection: """A collection of discretizations, defined on the same underlying :class:`~meshmode.mesh.Mesh`, corresponding to various mesh entities @@ -161,7 +180,9 @@ def __init__(self, array_context: ArrayContext, Mapping[DiscretizationTag, ElementGroupFactory]] = None, mpi_communicator: Optional["mpi4py.MPI.Intracomm"] = None, inter_partition_connections: Optional[ - Mapping[BoundaryDomainTag, DiscretizationConnection]] = None + Mapping[Tuple[PartitionID, PartitionID], + DiscretizationConnection]] = None, + part_id_helper: Optional[_PartitionIDHelper] = None, ) -> None: """ :arg discr_tag_to_group_factory: A mapping from discretization tags @@ -213,9 +234,9 @@ def __init__(self, array_context: ArrayContext, from meshmode.discretization import Discretization - # {{{ deprecated backward compatibility yuck - if isinstance(volume_discrs, Mesh): + # {{{ deprecated backward compatibility yuck + warn("Calling the DiscretizationCollection constructor directly " "is deprecated, call make_discretization_collection " "instead. This will stop working in 2023.", @@ -239,6 +260,21 @@ def __init__(self, array_context: ArrayContext, raise TypeError("may not pass inter_partition_connections when " "DiscretizationCollection constructor is called in " "legacy mode") + if part_id_helper is not None: + raise TypeError("may not pass part_id_helper when " + "DiscretizationCollection constructor is called in " + "legacy mode") + + if mpi_communicator is not None: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: rank, + get_mpi_rank=lambda part_id: part_id, + get_volume=lambda part_id: VTAG_ALL) + else: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: None, + get_mpi_rank=lambda part_id: None, + get_volume=lambda part_id: VTAG_ALL) self._inter_partition_connections = \ _set_up_inter_partition_connections( @@ -246,19 +282,26 @@ def __init__(self, array_context: ArrayContext, mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, base_group_factory=( - discr_tag_to_group_factory[DISCR_TAG_BASE])) + discr_tag_to_group_factory[DISCR_TAG_BASE]), + part_id_helper=part_id_helper) + self._part_id_helper = part_id_helper + + # }}} else: + assert discr_tag_to_group_factory is not None + self._discr_tag_to_group_factory = discr_tag_to_group_factory + if inter_partition_connections is None: raise TypeError("inter_partition_connections must be passed when " "DiscretizationCollection constructor is called in " "'modern' mode") + if part_id_helper is None: + raise TypeError("part_id_helper must be passed when " + "DiscretizationCollection constructor is called in " + "'modern' mode") self._inter_partition_connections = inter_partition_connections - - assert discr_tag_to_group_factory is not None - self._discr_tag_to_group_factory = discr_tag_to_group_factory - - # }}} + self._part_id_helper = part_id_helper self._volume_discrs = volume_discrs @@ -741,104 +784,68 @@ def normal(self, dd): # {{{ distributed/multi-volume setup -def _check_btag(tag: BoundaryTag) -> Union[BTAG_MULTIVOL_PARTITION, BTAG_PARTITION]: - if isinstance(tag, BTAG_MULTIVOL_PARTITION): - return tag - - elif isinstance(tag, BTAG_PARTITION): - return tag - - else: - raise TypeError("unexpected type of inter-partition boundary tag " - f"'{type(tag)}'") - - -def _remote_rank_from_btag(btag: BoundaryTag) -> Optional[int]: - if isinstance(btag, BTAG_PARTITION): - return btag.part_nr - - elif isinstance(btag, BTAG_MULTIVOL_PARTITION): - return btag.other_rank - - else: - raise TypeError("unexpected type of inter-partition boundary tag " - f"'{type(btag)}'") - - -def _flip_dtag( - self_rank: Optional[int], - domain_tag: BoundaryDomainTag, - ) -> BoundaryDomainTag: - if isinstance(domain_tag.tag, BTAG_PARTITION): - assert self_rank is not None - return BoundaryDomainTag( - BTAG_PARTITION(self_rank), domain_tag.volume_tag) - - elif isinstance(domain_tag.tag, BTAG_MULTIVOL_PARTITION): - return BoundaryDomainTag( - BTAG_MULTIVOL_PARTITION( - other_rank=None if domain_tag.tag.other_rank is None else self_rank, - other_volume_tag=domain_tag.volume_tag), - domain_tag.tag.other_volume_tag) - - else: - raise TypeError("unexpected type of inter-partition boundary tag " - f"'{type(domain_tag.tag)}'") - - def _set_up_inter_partition_connections( array_context: ArrayContext, mpi_communicator: Optional["mpi4py.MPI.Intracomm"], volume_discrs: Mapping[VolumeTag, Discretization], - base_group_factory: ElementGroupFactory, + base_group_factory: ElementGroupFactory, + part_id_helper: _PartitionIDHelper, ) -> Mapping[ - BoundaryDomainTag, + Tuple[PartitionID, PartitionID], DiscretizationConnection]: - from meshmode.distributed import (get_inter_partition_tags, + from meshmode.distributed import (get_connected_partitions, make_remote_group_infos, InterRankBoundaryInfo, MPIBoundaryCommSetupHelper) - inter_part_tags = { - BoundaryDomainTag(_check_btag(btag), discr_vol_tag) - for discr_vol_tag, volume_discr in volume_discrs.items() - for btag in get_inter_partition_tags(volume_discr.mesh)} + rank = mpi_communicator.Get_rank() if mpi_communicator is not None else None + + # Save boundary restrictions as they're created to avoid potentially creating + # them twice in the loop below + cached_part_bdry_restrictions: Mapping[ + Tuple[PartitionID, PartitionID], + DiscretizationConnection] = {} + + def get_part_bdry_restriction(self_part_id, other_part_id): + cached_result = cached_part_bdry_restrictions.get( + (self_part_id, other_part_id), None) + if cached_result is not None: + return cached_result + self_vtag = part_id_helper.get_volume(self_part_id) + return cached_part_bdry_restrictions.setdefault( + (self_part_id, other_part_id), + make_face_restriction( + array_context, volume_discrs[self_vtag], + base_group_factory, + boundary_tag=BTAG_PARTITION(other_part_id))) inter_part_conns: Mapping[ - BoundaryDomainTag, + Tuple[PartitionID, PartitionID], DiscretizationConnection] = {} - if inter_part_tags: - local_boundary_restrictions = { - domain_tag: make_face_restriction( - array_context, volume_discrs[domain_tag.volume_tag], - base_group_factory, boundary_tag=domain_tag.tag) - for domain_tag in inter_part_tags} - - irbis = [] - - for domain_tag in inter_part_tags: - assert isinstance( - domain_tag.tag, (BTAG_PARTITION, BTAG_MULTIVOL_PARTITION)) + irbis = [] - other_rank = _remote_rank_from_btag(domain_tag.tag) - btag_restr = local_boundary_restrictions[domain_tag] + for vtag, volume_discr in volume_discrs.items(): + part_id = part_id_helper.make(rank, vtag) + connected_part_ids = get_connected_partitions(volume_discr.mesh) + for connected_part_id in connected_part_ids: + bdry_restr = get_part_bdry_restriction(part_id, connected_part_id) - if other_rank is None: + if part_id_helper.get_mpi_rank(connected_part_id) == rank: # {{{ rank-local interface between multiple volumes - assert isinstance(domain_tag.tag, BTAG_MULTIVOL_PARTITION) + rev_bdry_restr = get_part_bdry_restriction( + connected_part_id, part_id) from meshmode.discretization.connection import \ make_partition_connection - remote_dtag = _flip_dtag(None, domain_tag) - inter_part_conns[domain_tag] = make_partition_connection( + inter_part_conns[connected_part_id, part_id] = \ + make_partition_connection( array_context, - local_bdry_conn=btag_restr, - remote_bdry_discr=( - local_boundary_restrictions[remote_dtag].to_discr), + local_bdry_conn=bdry_restr, + remote_bdry_discr=rev_bdry_restr.to_discr, remote_group_infos=make_remote_group_infos( - array_context, remote_dtag.tag, btag_restr)) + array_context, connected_part_id, rev_bdry_restr)) # }}} else: @@ -850,27 +857,26 @@ def _set_up_inter_partition_connections( irbis.append( InterRankBoundaryInfo( - local_btag=domain_tag.tag, - local_part_id=domain_tag, - remote_part_id=_flip_dtag( - mpi_communicator.rank, domain_tag), - remote_rank=other_rank, - local_boundary_connection=btag_restr)) + local_part_id=part_id, + remote_part_id=connected_part_id, + remote_rank=part_id_helper.get_mpi_rank( + connected_part_id), + local_boundary_connection=bdry_restr)) # }}} - if irbis: - assert mpi_communicator is not None + if irbis: + assert mpi_communicator is not None - with MPIBoundaryCommSetupHelper(mpi_communicator, array_context, - irbis, base_group_factory) as bdry_setup_helper: - while True: - conns = bdry_setup_helper.complete_some() - if not conns: - # We're done. - break + with MPIBoundaryCommSetupHelper(mpi_communicator, array_context, + irbis, base_group_factory) as bdry_setup_helper: + while True: + conns = bdry_setup_helper.complete_some() + if not conns: + # We're done. + break - inter_part_conns.update(conns) + inter_part_conns.update(conns) return inter_part_conns @@ -954,6 +960,7 @@ def make_discretization_collection( from pytools import single_valued, is_single_valued + assert len(volumes) > 0 assert is_single_valued(mesh_or_discr.ambient_dim for mesh_or_discr in volumes.values()) @@ -973,54 +980,44 @@ def make_discretization_collection( if isinstance(mesh_or_discr, Mesh) else mesh_or_discr) for vtag, mesh_or_discr in volumes.items()} + mpi_communicator = getattr(array_context, "mpi_communicator", None) + + if VTAG_ALL not in volumes.keys(): + # Multi-volume + if mpi_communicator is not None: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: (rank, vtag), + get_mpi_rank=lambda part_id: part_id[0], + get_volume=lambda part_id: part_id[1]) + else: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: vtag, + get_mpi_rank=lambda part_id: None, + get_volume=lambda part_id: part_id) + else: + # Single-volume + if mpi_communicator is not None: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: rank, + get_mpi_rank=lambda part_id: part_id, + get_volume=lambda part_id: VTAG_ALL) + else: + part_id_helper = _PartitionIDHelper( + make=lambda rank, vtag: None, + get_mpi_rank=lambda part_id: None, + get_volume=lambda part_id: VTAG_ALL) + return DiscretizationCollection( array_context=array_context, volume_discrs=volume_discrs, discr_tag_to_group_factory=discr_tag_to_group_factory, inter_partition_connections=_set_up_inter_partition_connections( array_context=array_context, - mpi_communicator=getattr( - array_context, "mpi_communicator", None), + mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, base_group_factory=discr_tag_to_group_factory[DISCR_TAG_BASE], - )) - -# }}} - - -# {{{ relabel_partitions - -def relabel_partitions(mesh: Mesh, - self_rank: int, - part_nr_to_rank_and_vol_tag: Mapping[int, Tuple[int, VolumeTag]]) -> Mesh: - """Given a partitioned mesh (which includes :class:`meshmode.mesh.BTAG_PARTITION` - boundary tags), relabel those boundary tags into - :class:`grudge.dof_desc.BTAG_MULTIVOL_PARTITION` tags, which map each - of the incoming partitions onto a combination of rank and volume tag, - given by *part_nr_to_rank_and_vol_tag*. - """ - - def _new_btag(btag: BoundaryTag) -> BTAG_MULTIVOL_PARTITION: - if not isinstance(btag, BTAG_PARTITION): - raise TypeError("unexpected inter-partition boundary tags of type " - f"'{type(btag)}', expected BTAG_PARTITION") - - rank, vol_tag = part_nr_to_rank_and_vol_tag[btag.part_nr] - return BTAG_MULTIVOL_PARTITION( - other_rank=(None if rank == self_rank else rank), - other_volume_tag=vol_tag) - - assert mesh.facial_adjacency_groups is not None - - from dataclasses import replace - return mesh.copy(facial_adjacency_groups=[ - [ - replace(fagrp, - boundary_tag=_new_btag(fagrp.boundary_tag)) - if isinstance(fagrp, InterPartitionAdjacencyGroup) - else fagrp - for fagrp in grp_fagrp_list] - for grp_fagrp_list in mesh.facial_adjacency_groups]) + part_id_helper=part_id_helper), + part_id_helper=part_id_helper) # }}} diff --git a/grudge/dof_desc.py b/grudge/dof_desc.py index e495902e8..c4827dd24 100644 --- a/grudge/dof_desc.py +++ b/grudge/dof_desc.py @@ -8,8 +8,6 @@ :mod:`grudge`-specific boundary tags ------------------------------------ -.. autoclass:: BTAG_MULTIVOL_PARTITION - Domain tags ----------- @@ -100,24 +98,6 @@ class VTAG_ALL: # noqa: N801 # }}} -# {{{ partition identifier - -@dataclass(init=True, eq=True, frozen=True) -class BTAG_MULTIVOL_PARTITION: # noqa: N801 - """ - .. attribute:: other_rank - - An integer, or *None*. If *None*, this marks a partition boundary - to another volume on the same rank. - - .. attribute:: other_volume_tag - """ - other_rank: Optional[int] - other_volume_tag: "VolumeTag" - -# }}} - - # {{{ domain tag @dataclass(frozen=True, eq=True) @@ -350,7 +330,7 @@ def _normalize_domain_and_discr_tag( domain = BoundaryDomainTag(FACE_RESTR_ALL) elif domain in [FACE_RESTR_INTERIOR, "int_faces"]: domain = BoundaryDomainTag(FACE_RESTR_INTERIOR) - elif isinstance(domain, (BTAG_PARTITION, BTAG_MULTIVOL_PARTITION)): + elif isinstance(domain, BTAG_PARTITION): domain = BoundaryDomainTag(domain, _contextual_volume_tag) elif domain in [BTAG_ALL, BTAG_REALLY_ALL, BTAG_NONE]: domain = BoundaryDomainTag(domain, _contextual_volume_tag) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 99d1cdafe..7d9f216bf 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -72,17 +72,17 @@ from pytools import memoize_on_first_arg from pytools.obj_array import obj_array_vectorize -from grudge.discretization import DiscretizationCollection, _remote_rank_from_btag +from grudge.discretization import DiscretizationCollection from grudge.projection import project -from meshmode.mesh import BTAG_PARTITION +from meshmode.mesh import BTAG_PARTITION, PartitionID import numpy as np import grudge.dof_desc as dof_desc from grudge.dof_desc import ( DOFDesc, DD_VOLUME_ALL, FACE_RESTR_INTERIOR, DISCR_TAG_BASE, - VolumeTag, VolumeDomainTag, BoundaryDomainTag, BTAG_MULTIVOL_PARTITION, + VolumeTag, VolumeDomainTag, BoundaryDomainTag, ConvertibleToDOFDesc, ) @@ -377,15 +377,16 @@ def local_inter_volume_trace_pairs( raise TypeError( f"expected a base-discretized other DOFDesc, got '{other_volume_dd}'") - self_btag = BTAG_MULTIVOL_PARTITION( - other_rank=None, - other_volume_tag=other_volume_dd.domain_tag.tag) - other_btag = BTAG_MULTIVOL_PARTITION( - other_rank=None, - other_volume_tag=self_volume_dd.domain_tag.tag) + rank = ( + dcoll.mpi_communicator.Get_rank() + if dcoll.mpi_communicator is not None + else None) + + self_part_id = dcoll._part_id_helper.make(rank, self_volume_dd.domain_tag.tag) + other_part_id = dcoll._part_id_helper.make(rank, other_volume_dd.domain_tag.tag) - self_trace_dd = self_volume_dd.trace(self_btag) - other_trace_dd = other_volume_dd.trace(other_btag) + self_trace_dd = self_volume_dd.trace(BTAG_PARTITION(other_part_id)) + other_trace_dd = other_volume_dd.trace(BTAG_PARTITION(self_part_id)) # FIXME: In all likelihood, these traces will be reevaluated from # the other side, which is hard to prevent given the interface we @@ -396,7 +397,7 @@ def local_inter_volume_trace_pairs( dcoll, other_volume_dd, other_trace_dd, other_ary) other_to_self = dcoll._inter_partition_connections[ - BoundaryDomainTag(self_btag, self_volume_dd.domain_tag.tag)] + other_part_id, self_part_id] def get_opposite_trace(el): if isinstance(el, Number): @@ -441,29 +442,19 @@ def update_for_type(self, key_hash, key: Type[Any]): @memoize_on_first_arg -def _remote_inter_partition_tags( +def _connected_partitions( dcoll: DiscretizationCollection, self_volume_tag: VolumeTag, - other_volume_tag: Optional[VolumeTag] = None - ) -> Sequence[BoundaryDomainTag]: - if other_volume_tag is None: - other_volume_tag = self_volume_tag - - result: List[BoundaryDomainTag] = [] - for domain_tag in dcoll._inter_partition_connections: - if isinstance(domain_tag.tag, BTAG_PARTITION): - if domain_tag.volume_tag == self_volume_tag: - result.append(domain_tag) - - elif isinstance(domain_tag.tag, BTAG_MULTIVOL_PARTITION): - if (domain_tag.tag.other_rank is not None - and domain_tag.volume_tag == self_volume_tag - and domain_tag.tag.other_volume_tag == other_volume_tag): - result.append(domain_tag) - - else: - raise AssertionError("unexpected inter-partition tag type encountered: " - f"'{domain_tag.tag}'") + other_volume_tag: VolumeTag + ) -> Sequence[PartitionID]: + result: List[PartitionID] = [ + connected_part_id + for connected_part_id, part_id in dcoll._inter_partition_connections.keys() + if ( + dcoll._part_id_helper.get_volume(part_id) == self_volume_tag + and ( + dcoll._part_id_helper.get_volume(connected_part_id) + == other_volume_tag))] return result @@ -505,21 +496,25 @@ class _RankBoundaryCommunicationEager: def __init__(self, actx: ArrayContext, dcoll: DiscretizationCollection, - domain_tag: BoundaryDomainTag, - *, local_bdry_data: ArrayOrContainerT, + *, + local_part_id: PartitionID, + remote_part_id: PartitionID, + local_bdry_data: ArrayOrContainerT, send_data: ArrayOrContainerT, comm_tag: Optional[Hashable] = None): comm = dcoll.mpi_communicator assert comm is not None - remote_rank = _remote_rank_from_btag(domain_tag.tag) + remote_rank = dcoll._part_id_helper.get_mpi_rank(remote_part_id) assert remote_rank is not None self.dcoll = dcoll self.array_context = actx - self.domain_tag = domain_tag - self.bdry_discr = dcoll.discr_from_dd(domain_tag) + self.local_part_id = local_part_id + self.remote_part_id = remote_part_id + self.bdry_discr = dcoll.discr_from_dd( + BoundaryDomainTag(BTAG_PARTITION(remote_part_id))) self.local_bdry_data = local_bdry_data self.comm_tag = self.base_comm_tag @@ -551,7 +546,8 @@ def finish(self): self.recv_data_np, self.array_context) unswapped_remote_bdry_data = unflatten(self.local_bdry_data, recv_data_flat, self.array_context) - bdry_conn = self.dcoll._inter_partition_connections[self.domain_tag] + bdry_conn = self.dcoll._inter_partition_connections[ + self.remote_part_id, self.local_part_id] remote_bdry_data = bdry_conn(unswapped_remote_bdry_data) # Complete the nonblocking send request associated with communicating @@ -559,7 +555,9 @@ def finish(self): self.send_req.Wait() return TracePair( - DOFDesc(self.domain_tag, DISCR_TAG_BASE), + DOFDesc( + BoundaryDomainTag(BTAG_PARTITION(self.remote_part_id)), + DISCR_TAG_BASE), interior=self.local_bdry_data, exterior=remote_bdry_data) @@ -572,8 +570,9 @@ class _RankBoundaryCommunicationLazy: def __init__(self, actx: ArrayContext, dcoll: DiscretizationCollection, - domain_tag: BoundaryDomainTag, *, + local_part_id: PartitionID, + remote_part_id: PartitionID, local_bdry_data: ArrayOrContainerT, send_data: ArrayOrContainerT, comm_tag: Optional[Hashable] = None) -> None: @@ -583,10 +582,12 @@ def __init__(self, self.dcoll = dcoll self.array_context = actx - self.bdry_discr = dcoll.discr_from_dd(domain_tag) - self.domain_tag = domain_tag + self.bdry_discr = dcoll.discr_from_dd( + BoundaryDomainTag(BTAG_PARTITION(remote_part_id))) + self.local_part_id = local_part_id + self.remote_part_id = remote_part_id - remote_rank = _remote_rank_from_btag(domain_tag.tag) + remote_rank = dcoll._part_id_helper.get_mpi_rank(remote_part_id) assert remote_rank is not None self.local_bdry_data = local_bdry_data @@ -615,10 +616,13 @@ def communicate_single_array(key, local_bdry_subary): communicate_single_array, self.local_bdry_data) def finish(self): - bdry_conn = self.dcoll._inter_partition_connections[self.domain_tag] + bdry_conn = self.dcoll._inter_partition_connections[ + self.remote_part_id, self.local_part_id] return TracePair( - DOFDesc(self.domain_tag, DISCR_TAG_BASE), + DOFDesc( + BoundaryDomainTag(BTAG_PARTITION(self.remote_part_id)), + DISCR_TAG_BASE), interior=self.local_bdry_data, exterior=bdry_conn(self.remote_data)) @@ -682,19 +686,37 @@ def cross_rank_trace_pairs( # }}} - comm_bdtags = _remote_inter_partition_tags( - dcoll, self_volume_tag=volume_dd.domain_tag.tag) + if dcoll.mpi_communicator is None: + return [] + + rank = dcoll.mpi_communicator.Get_rank() + + local_part_id = dcoll._part_id_helper.make(rank, volume_dd.domain_tag.tag) + + connected_part_ids = _connected_partitions( + dcoll, self_volume_tag=volume_dd.domain_tag.tag, + other_volume_tag=volume_dd.domain_tag.tag) + + remote_part_ids = [ + part_id + for part_id in connected_part_ids + if dcoll._part_id_helper.get_mpi_rank(part_id) != rank] # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(comm_bdtags) == len( - {_remote_rank_from_btag(bdtag.tag) for bdtag in comm_bdtags}) + assert len(remote_part_ids) == len( + {dcoll._part_id_helper.get_mpi_rank(part_id) for part_id in remote_part_ids}) if isinstance(ary, Number): # NOTE: Assumes that the same number is passed on every rank - return [TracePair(DOFDesc(bdtag, DISCR_TAG_BASE), interior=ary, exterior=ary) - for bdtag in comm_bdtags] + return [ + TracePair( + DOFDesc( + BoundaryDomainTag(BTAG_PARTITION(remote_part_id)), + DISCR_TAG_BASE), + interior=ary, exterior=ary) + for remote_part_id in remote_part_ids] actx = get_container_context_recursively(ary) assert actx is not None @@ -706,19 +728,21 @@ def cross_rank_trace_pairs( else: rbc = _RankBoundaryCommunicationEager - def start_comm(bdtag): - local_bdry_data = project( - dcoll, - DOFDesc(VolumeDomainTag(bdtag.volume_tag), DISCR_TAG_BASE), - DOFDesc(bdtag, DISCR_TAG_BASE), - ary) + def start_comm(remote_part_id): + bdtag = BoundaryDomainTag(BTAG_PARTITION(remote_part_id)) - return rbc(actx, dcoll, bdtag, + local_bdry_data = project(dcoll, volume_dd, bdtag, ary) + + return rbc(actx, dcoll, + local_part_id=local_part_id, + remote_part_id=remote_part_id, local_bdry_data=local_bdry_data, send_data=local_bdry_data, comm_tag=comm_tag) - rank_bdry_communcators = [start_comm(bdtag) for bdtag in comm_bdtags] + rank_bdry_communcators = [ + start_comm(remote_part_id) + for remote_part_id in remote_part_ids] return [rc.finish() for rc in rank_bdry_communcators] # }}} @@ -758,16 +782,27 @@ def cross_rank_inter_volume_trace_pairs( # }}} - comm_bdtags = _remote_inter_partition_tags( - dcoll, - self_volume_tag=self_volume_dd.domain_tag.tag, + if dcoll.mpi_communicator is None: + return [] + + rank = dcoll.mpi_communicator.Get_rank() + + local_part_id = dcoll._part_id_helper.make(rank, self_volume_dd.domain_tag.tag) + + connected_part_ids = _connected_partitions( + dcoll, self_volume_tag=self_volume_dd.domain_tag.tag, other_volume_tag=other_volume_dd.domain_tag.tag) + remote_part_ids = [ + part_id + for part_id in connected_part_ids + if dcoll._part_id_helper.get_mpi_rank(part_id) != rank] + # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(comm_bdtags) == len( - {_remote_rank_from_btag(bdtag.tag) for bdtag in comm_bdtags}) + assert len(remote_part_ids) == len( + {dcoll._part_id_helper.get_mpi_rank(part_id) for part_id in remote_part_ids}) actx = get_container_context_recursively(self_ary) assert actx is not None @@ -779,25 +814,23 @@ def cross_rank_inter_volume_trace_pairs( else: rbc = _RankBoundaryCommunicationEager - def start_comm(bdtag): - assert isinstance(bdtag.tag, BTAG_MULTIVOL_PARTITION) - self_volume_dd = DOFDesc( - VolumeDomainTag(bdtag.volume_tag), DISCR_TAG_BASE) - other_volume_dd = DOFDesc( - VolumeDomainTag(bdtag.tag.other_volume_tag), DISCR_TAG_BASE) + def start_comm(remote_part_id): + bdtag = BoundaryDomainTag(BTAG_PARTITION(remote_part_id)) local_bdry_data = project(dcoll, self_volume_dd, bdtag, self_ary) send_data = project(dcoll, other_volume_dd, - BTAG_MULTIVOL_PARTITION( - other_rank=bdtag.tag.other_rank, - other_volume_tag=bdtag.volume_tag), other_ary) + BTAG_PARTITION(local_part_id), other_ary) - return rbc(actx, dcoll, bdtag, + return rbc(actx, dcoll, + local_part_id=local_part_id, + remote_part_id=remote_part_id, local_bdry_data=local_bdry_data, send_data=send_data, comm_tag=comm_tag) - rank_bdry_communcators = [start_comm(bdtag) for bdtag in comm_bdtags] + rank_bdry_communcators = [ + start_comm(remote_part_id) + for remote_part_id in remote_part_ids] return [rc.finish() for rc in rank_bdry_communcators] # }}} diff --git a/test/test_grudge.py b/test/test_grudge.py index 786b5a0e2..c0a562cef 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -1083,22 +1083,17 @@ def test_multi_volume(actx_factory): nelements_per_axis=(8,)*dim, order=4) meg, = mesh.groups - part_per_element = ( - mesh.vertices[0, meg.vertex_indices[:, 0]] > 0).astype(np.int32) + x = mesh.vertices[0, meg.vertex_indices] + x_elem_avg = np.sum(x, axis=1)/x.shape[1] + volume_per_element = (x_elem_avg > 0).astype(np.int32) + + from meshmode.distributed import membership_list_to_map + volume_to_elements = membership_list_to_map(volume_per_element) from meshmode.mesh.processing import partition_mesh - from grudge.discretization import relabel_partitions - parts = { - i: relabel_partitions( - partition_mesh(mesh, part_per_element, i)[0], - self_rank=0, - part_nr_to_rank_and_vol_tag={ - 0: (0, 0), - 1: (0, 1), - }) - for i in range(2)} - - make_discretization_collection(actx, parts, order=4) + volume_to_mesh = partition_mesh(mesh, volume_to_elements) + + make_discretization_collection(actx, volume_to_mesh, order=4) # }}} From ea570ee792f30d3273a6dc1fade10f5aa4a126e8 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 21 Mar 2022 14:23:46 -0500 Subject: [PATCH 02/30] temporarily change meshmode branch --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6d8841e9a..d2383ba1d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ git+https://github.com/inducer/leap.git#egg=leap git+https://github.com/inducer/meshpy.git#egg=meshpy git+https://github.com/inducer/modepy.git#egg=modepy git+https://github.com/inducer/arraycontext.git#egg=arraycontext -git+https://github.com/inducer/meshmode.git@generic-part-bdry#egg=meshmode +git+https://github.com/majosm/meshmode.git@mpi-distribute#egg=meshmode git+https://github.com/inducer/pyvisfile.git#egg=pyvisfile git+https://github.com/inducer/pymetis.git#egg=pymetis git+https://github.com/illinois-ceesd/logpyle.git#egg=logpyle From 7cc98f65b78f2cb82ddf8b30dada5c96926327ee Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Tue, 22 Mar 2022 12:31:21 -0500 Subject: [PATCH 03/30] fix docs --- grudge/discretization.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 4cc7fe639..64cc0d930 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -8,10 +8,10 @@ .. currentmodule:: grudge.discretization -Internal things that are visble due to type annotations -------------------------------------------------------- +Internal things that are visible due to type annotations +-------------------------------------------------------- -.. class:: _InterPartitionConnectionPair +.. class:: _PartitionIDHelper """ __copyright__ = """ @@ -152,6 +152,11 @@ class DiscretizationCollection: (volume, interior facets, boundaries) and associated element groups. + .. note:: + + Do not call the constructor directly. Use + :func:`make_discretization_collection` instead. + .. autoattribute:: dim .. autoattribute:: ambient_dim .. autoattribute:: real_dtype From 09bac09d98f54dce412fac6d0b4f52babce4916f Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 29 Jun 2022 15:58:14 -0500 Subject: [PATCH 04/30] promote to part ID instead of using part ID helper --- grudge/discretization.py | 156 +++++++++++++++++---------------------- grudge/trace_pair.py | 32 ++++---- 2 files changed, 83 insertions(+), 105 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index b1532745b..0c55d646e 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -7,11 +7,6 @@ .. autofunction:: make_discretization_collection .. currentmodule:: grudge.discretization - -Internal things that are visible due to type annotations --------------------------------------------------------- - -.. class:: _PartitionIDHelper """ __copyright__ = """ @@ -39,11 +34,11 @@ THE SOFTWARE. """ -from typing import Mapping, Optional, Union, Tuple, Callable, TYPE_CHECKING, Any +from typing import Mapping, Optional, Union, Tuple, TYPE_CHECKING, Any from pytools import memoize_method, single_valued -from dataclasses import dataclass +from dataclasses import replace from grudge.dof_desc import ( VTAG_ALL, @@ -69,7 +64,7 @@ make_face_restriction, DiscretizationConnection ) -from meshmode.mesh import Mesh, BTAG_PARTITION, PartitionID +from meshmode.mesh import Mesh, BTAG_PARTITION from meshmode.dof_array import DOFArray from warnings import warn @@ -78,6 +73,37 @@ import mpi4py.MPI +PartitionID = Tuple[VolumeTag, int] + + +# {{{ partition ID normalization + +def _normalize_mesh_part_ids(mesh, promote_to_part_id): + facial_adjacency_groups = mesh.facial_adjacency_groups + + new_facial_adjacency_groups = [] + + from meshmode.mesh import InterPartitionAdjacencyGroup + for grp_list in facial_adjacency_groups: + new_grp_list = [] + for fagrp in grp_list: + # FIXME: Will also need to replace part_id attribute (not added yet + # in this version) + if isinstance(fagrp, InterPartitionAdjacencyGroup): + new_fagrp = replace( + fagrp, + boundary_tag=BTAG_PARTITION( + promote_to_part_id(fagrp.boundary_tag.part_id))) + else: + new_fagrp = fagrp + new_grp_list.append(new_fagrp) + new_facial_adjacency_groups.append(new_grp_list) + + return mesh.copy(facial_adjacency_groups=new_facial_adjacency_groups) + +# }}} + + # {{{ discr_tag_to_group_factory normalization def _normalize_discr_tag_to_group_factory( @@ -125,27 +151,6 @@ def _normalize_discr_tag_to_group_factory( # }}} -# {{{ _PartitionIDHelper - -@dataclass(frozen=True, init=True, eq=True) -class _PartitionIDHelper: - """ - Provides a unified interface for creating and inspecting partition identifiers - that does not depend on whether the problem is distributed, multi-volume, etc. - - .. attribute:: make - .. attribute:: get_mpi_rank - .. attribute:: get_volume - - .. automethod:: __init__ - """ - make: Callable[[Optional[int], VolumeTag], PartitionID] - get_mpi_rank: Callable[[PartitionID], Optional[int]] - get_volume: Callable[[PartitionID], VolumeTag] - -# }}} - - class DiscretizationCollection: """A collection of discretizations, defined on the same underlying :class:`~meshmode.mesh.Mesh`, corresponding to various mesh entities @@ -187,7 +192,6 @@ def __init__(self, array_context: ArrayContext, inter_partition_connections: Optional[ Mapping[Tuple[PartitionID, PartitionID], DiscretizationConnection]] = None, - part_id_helper: Optional[_PartitionIDHelper] = None, ) -> None: """ :arg discr_tag_to_group_factory: A mapping from discretization tags @@ -237,6 +241,16 @@ def __init__(self, array_context: ArrayContext, DeprecationWarning, stacklevel=2) mesh = volume_discrs + + if mpi_communicator is not None: + def promote_to_part_id(key): + return (VTAG_ALL, key) + else: + def promote_to_part_id(key): + return (VTAG_ALL, None) + + mesh = _normalize_mesh_part_ids(mesh, promote_to_part_id) + discr_tag_to_group_factory = _normalize_discr_tag_to_group_factory( dim=mesh.dim, discr_tag_to_group_factory=discr_tag_to_group_factory, @@ -254,21 +268,6 @@ def __init__(self, array_context: ArrayContext, raise TypeError("may not pass inter_partition_connections when " "DiscretizationCollection constructor is called in " "legacy mode") - if part_id_helper is not None: - raise TypeError("may not pass part_id_helper when " - "DiscretizationCollection constructor is called in " - "legacy mode") - - if mpi_communicator is not None: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: rank, - get_mpi_rank=lambda part_id: part_id, - get_volume=lambda part_id: VTAG_ALL) - else: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: None, - get_mpi_rank=lambda part_id: None, - get_volume=lambda part_id: VTAG_ALL) self._inter_partition_connections = \ _set_up_inter_partition_connections( @@ -276,9 +275,7 @@ def __init__(self, array_context: ArrayContext, mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, base_group_factory=( - discr_tag_to_group_factory[DISCR_TAG_BASE]), - part_id_helper=part_id_helper) - self._part_id_helper = part_id_helper + discr_tag_to_group_factory[DISCR_TAG_BASE])) # }}} else: @@ -289,13 +286,8 @@ def __init__(self, array_context: ArrayContext, raise TypeError("inter_partition_connections must be passed when " "DiscretizationCollection constructor is called in " "'modern' mode") - if part_id_helper is None: - raise TypeError("part_id_helper must be passed when " - "DiscretizationCollection constructor is called in " - "'modern' mode") self._inter_partition_connections = inter_partition_connections - self._part_id_helper = part_id_helper self._volume_discrs = volume_discrs @@ -782,7 +774,6 @@ def _set_up_inter_partition_connections( mpi_communicator: Optional["mpi4py.MPI.Intracomm"], volume_discrs: Mapping[VolumeTag, Discretization], base_group_factory: ElementGroupFactory, - part_id_helper: _PartitionIDHelper, ) -> Mapping[ Tuple[PartitionID, PartitionID], DiscretizationConnection]: @@ -804,11 +795,10 @@ def get_part_bdry_restriction(self_part_id, other_part_id): (self_part_id, other_part_id), None) if cached_result is not None: return cached_result - self_vtag = part_id_helper.get_volume(self_part_id) return cached_part_bdry_restrictions.setdefault( (self_part_id, other_part_id), make_face_restriction( - array_context, volume_discrs[self_vtag], + array_context, volume_discrs[self_part_id[0]], base_group_factory, boundary_tag=BTAG_PARTITION(other_part_id))) @@ -819,12 +809,12 @@ def get_part_bdry_restriction(self_part_id, other_part_id): irbis = [] for vtag, volume_discr in volume_discrs.items(): - part_id = part_id_helper.make(rank, vtag) + part_id = (vtag, rank) connected_part_ids = get_connected_partitions(volume_discr.mesh) for connected_part_id in connected_part_ids: bdry_restr = get_part_bdry_restriction(part_id, connected_part_id) - if part_id_helper.get_mpi_rank(connected_part_id) == rank: + if connected_part_id[1] == rank: # {{{ rank-local interface between multiple volumes rev_bdry_restr = get_part_bdry_restriction( @@ -852,8 +842,7 @@ def get_part_bdry_restriction(self_part_id, other_part_id): InterRankBoundaryInfo( local_part_id=part_id, remote_part_id=connected_part_id, - remote_rank=part_id_helper.get_mpi_rank( - connected_part_id), + remote_rank=connected_part_id[1], local_boundary_connection=bdry_restr)) # }}} @@ -965,40 +954,35 @@ def make_discretization_collection( del order - volume_discrs = { - vtag: ( - Discretization( - array_context, mesh_or_discr, - discr_tag_to_group_factory[DISCR_TAG_BASE]) - if isinstance(mesh_or_discr, Mesh) else mesh_or_discr) - for vtag, mesh_or_discr in volumes.items()} - mpi_communicator = getattr(array_context, "mpi_communicator", None) if VTAG_ALL not in volumes.keys(): # Multi-volume if mpi_communicator is not None: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: (rank, vtag), - get_mpi_rank=lambda part_id: part_id[0], - get_volume=lambda part_id: part_id[1]) + def promote_to_part_id(key): + return key else: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: vtag, - get_mpi_rank=lambda part_id: None, - get_volume=lambda part_id: part_id) + def promote_to_part_id(key): + return (key, None) else: # Single-volume if mpi_communicator is not None: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: rank, - get_mpi_rank=lambda part_id: part_id, - get_volume=lambda part_id: VTAG_ALL) + def promote_to_part_id(key): + return (VTAG_ALL, key) else: - part_id_helper = _PartitionIDHelper( - make=lambda rank, vtag: None, - get_mpi_rank=lambda part_id: None, - get_volume=lambda part_id: VTAG_ALL) + def promote_to_part_id(key): + return (VTAG_ALL, None) + + if any( + isinstance(mesh_or_discr, Discretization) + for mesh_or_discr in volumes.values()): + raise NotImplementedError("Doesn't work at the moment") + + volume_discrs = { + vtag: Discretization( + array_context, _normalize_mesh_part_ids(mesh, promote_to_part_id), + discr_tag_to_group_factory[DISCR_TAG_BASE]) + for vtag, mesh in volumes.items()} return DiscretizationCollection( array_context=array_context, @@ -1008,9 +992,7 @@ def make_discretization_collection( array_context=array_context, mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, - base_group_factory=discr_tag_to_group_factory[DISCR_TAG_BASE], - part_id_helper=part_id_helper), - part_id_helper=part_id_helper) + base_group_factory=discr_tag_to_group_factory[DISCR_TAG_BASE])) # }}} diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 6719a6709..5da043243 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -78,10 +78,10 @@ from pytools import memoize_on_first_arg from pytools.obj_array import obj_array_vectorize -from grudge.discretization import DiscretizationCollection +from grudge.discretization import DiscretizationCollection, PartitionID from grudge.projection import project -from meshmode.mesh import BTAG_PARTITION, PartitionID +from meshmode.mesh import BTAG_PARTITION import numpy as np @@ -383,8 +383,8 @@ def local_inter_volume_trace_pairs( if dcoll.mpi_communicator is not None else None) - self_part_id = dcoll._part_id_helper.make(rank, self_volume_dd.domain_tag.tag) - other_part_id = dcoll._part_id_helper.make(rank, other_volume_dd.domain_tag.tag) + self_part_id = (self_volume_dd.domain_tag.tag, rank) + other_part_id = (other_volume_dd.domain_tag.tag, rank) self_trace_dd = self_volume_dd.trace(BTAG_PARTITION(other_part_id)) other_trace_dd = other_volume_dd.trace(BTAG_PARTITION(self_part_id)) @@ -452,10 +452,8 @@ def _connected_partitions( connected_part_id for connected_part_id, part_id in dcoll._inter_partition_connections.keys() if ( - dcoll._part_id_helper.get_volume(part_id) == self_volume_tag - and ( - dcoll._part_id_helper.get_volume(connected_part_id) - == other_volume_tag))] + part_id[0] == self_volume_tag + and connected_part_id[0] == other_volume_tag)] return result @@ -507,7 +505,7 @@ def __init__(self, comm = dcoll.mpi_communicator assert comm is not None - remote_rank = dcoll._part_id_helper.get_mpi_rank(remote_part_id) + remote_rank = remote_part_id[1] assert remote_rank is not None self.dcoll = dcoll @@ -588,7 +586,7 @@ def __init__(self, self.local_part_id = local_part_id self.remote_part_id = remote_part_id - remote_rank = dcoll._part_id_helper.get_mpi_rank(remote_part_id) + remote_rank = remote_part_id[1] assert remote_rank is not None self.local_bdry_data = local_bdry_data @@ -693,7 +691,7 @@ def cross_rank_trace_pairs( rank = dcoll.mpi_communicator.Get_rank() - local_part_id = dcoll._part_id_helper.make(rank, volume_dd.domain_tag.tag) + local_part_id = (volume_dd.domain_tag.tag, rank) connected_part_ids = _connected_partitions( dcoll, self_volume_tag=volume_dd.domain_tag.tag, @@ -702,13 +700,12 @@ def cross_rank_trace_pairs( remote_part_ids = [ part_id for part_id in connected_part_ids - if dcoll._part_id_helper.get_mpi_rank(part_id) != rank] + if part_id[1] != rank] # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(remote_part_ids) == len( - {dcoll._part_id_helper.get_mpi_rank(part_id) for part_id in remote_part_ids}) + assert len(remote_part_ids) == len({part_id[1] for part_id in remote_part_ids}) if isinstance(ary, Number): # NOTE: Assumes that the same number is passed on every rank @@ -789,7 +786,7 @@ def cross_rank_inter_volume_trace_pairs( rank = dcoll.mpi_communicator.Get_rank() - local_part_id = dcoll._part_id_helper.make(rank, self_volume_dd.domain_tag.tag) + local_part_id = (self_volume_dd.domain_tag.tag, rank) connected_part_ids = _connected_partitions( dcoll, self_volume_tag=self_volume_dd.domain_tag.tag, @@ -798,13 +795,12 @@ def cross_rank_inter_volume_trace_pairs( remote_part_ids = [ part_id for part_id in connected_part_ids - if dcoll._part_id_helper.get_mpi_rank(part_id) != rank] + if part_id[1] != rank] # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(remote_part_ids) == len( - {dcoll._part_id_helper.get_mpi_rank(part_id) for part_id in remote_part_ids}) + assert len(remote_part_ids) == len({part_id[1] for part_id in remote_part_ids}) actx = get_container_context_recursively(self_ary) assert actx is not None From c46a6b58b961890f50877b0cab6ae285ed13079b Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 29 Jun 2022 16:03:57 -0500 Subject: [PATCH 05/30] clarify part vs. partition terminology --- grudge/discretization.py | 42 +++++++++++++++++++--------------------- grudge/trace_pair.py | 35 ++++++++++++++++----------------- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 0c55d646e..413e52707 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -73,23 +73,21 @@ import mpi4py.MPI -PartitionID = Tuple[VolumeTag, int] +PartID = Tuple[VolumeTag, int] -# {{{ partition ID normalization +# {{{ part ID normalization def _normalize_mesh_part_ids(mesh, promote_to_part_id): facial_adjacency_groups = mesh.facial_adjacency_groups new_facial_adjacency_groups = [] - from meshmode.mesh import InterPartitionAdjacencyGroup + from meshmode.mesh import InterPartAdjacencyGroup for grp_list in facial_adjacency_groups: new_grp_list = [] for fagrp in grp_list: - # FIXME: Will also need to replace part_id attribute (not added yet - # in this version) - if isinstance(fagrp, InterPartitionAdjacencyGroup): + if isinstance(fagrp, InterPartAdjacencyGroup): new_fagrp = replace( fagrp, boundary_tag=BTAG_PARTITION( @@ -189,8 +187,8 @@ def __init__(self, array_context: ArrayContext, discr_tag_to_group_factory: Optional[ Mapping[DiscretizationTag, ElementGroupFactory]] = None, mpi_communicator: Optional["mpi4py.MPI.Intracomm"] = None, - inter_partition_connections: Optional[ - Mapping[Tuple[PartitionID, PartitionID], + inter_part_connections: Optional[ + Mapping[Tuple[PartID, PartID], DiscretizationConnection]] = None, ) -> None: """ @@ -264,13 +262,13 @@ def promote_to_part_id(key): del mesh - if inter_partition_connections is not None: - raise TypeError("may not pass inter_partition_connections when " + if inter_part_connections is not None: + raise TypeError("may not pass inter_part_connections when " "DiscretizationCollection constructor is called in " "legacy mode") - self._inter_partition_connections = \ - _set_up_inter_partition_connections( + self._inter_part_connections = \ + _set_up_inter_part_connections( array_context=self._setup_actx, mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, @@ -282,12 +280,12 @@ def promote_to_part_id(key): assert discr_tag_to_group_factory is not None self._discr_tag_to_group_factory = discr_tag_to_group_factory - if inter_partition_connections is None: - raise TypeError("inter_partition_connections must be passed when " + if inter_part_connections is None: + raise TypeError("inter_part_connections must be passed when " "DiscretizationCollection constructor is called in " "'modern' mode") - self._inter_partition_connections = inter_partition_connections + self._inter_part_connections = inter_part_connections self._volume_discrs = volume_discrs @@ -769,16 +767,16 @@ def normal(self, dd): # {{{ distributed/multi-volume setup -def _set_up_inter_partition_connections( +def _set_up_inter_part_connections( array_context: ArrayContext, mpi_communicator: Optional["mpi4py.MPI.Intracomm"], volume_discrs: Mapping[VolumeTag, Discretization], base_group_factory: ElementGroupFactory, ) -> Mapping[ - Tuple[PartitionID, PartitionID], + Tuple[PartID, PartID], DiscretizationConnection]: - from meshmode.distributed import (get_connected_partitions, + from meshmode.distributed import (get_connected_parts, make_remote_group_infos, InterRankBoundaryInfo, MPIBoundaryCommSetupHelper) @@ -787,7 +785,7 @@ def _set_up_inter_partition_connections( # Save boundary restrictions as they're created to avoid potentially creating # them twice in the loop below cached_part_bdry_restrictions: Mapping[ - Tuple[PartitionID, PartitionID], + Tuple[PartID, PartID], DiscretizationConnection] = {} def get_part_bdry_restriction(self_part_id, other_part_id): @@ -803,14 +801,14 @@ def get_part_bdry_restriction(self_part_id, other_part_id): boundary_tag=BTAG_PARTITION(other_part_id))) inter_part_conns: Mapping[ - Tuple[PartitionID, PartitionID], + Tuple[PartID, PartID], DiscretizationConnection] = {} irbis = [] for vtag, volume_discr in volume_discrs.items(): part_id = (vtag, rank) - connected_part_ids = get_connected_partitions(volume_discr.mesh) + connected_part_ids = get_connected_parts(volume_discr.mesh) for connected_part_id in connected_part_ids: bdry_restr = get_part_bdry_restriction(part_id, connected_part_id) @@ -988,7 +986,7 @@ def promote_to_part_id(key): array_context=array_context, volume_discrs=volume_discrs, discr_tag_to_group_factory=discr_tag_to_group_factory, - inter_partition_connections=_set_up_inter_partition_connections( + inter_part_connections=_set_up_inter_part_connections( array_context=array_context, mpi_communicator=mpi_communicator, volume_discrs=volume_discrs, diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 5da043243..e110ae43d 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -78,7 +78,7 @@ from pytools import memoize_on_first_arg from pytools.obj_array import obj_array_vectorize -from grudge.discretization import DiscretizationCollection, PartitionID +from grudge.discretization import DiscretizationCollection, PartID from grudge.projection import project from meshmode.mesh import BTAG_PARTITION @@ -397,8 +397,7 @@ def local_inter_volume_trace_pairs( other_trace = project( dcoll, other_volume_dd, other_trace_dd, other_ary) - other_to_self = dcoll._inter_partition_connections[ - other_part_id, self_part_id] + other_to_self = dcoll._inter_part_connections[other_part_id, self_part_id] def get_opposite_trace(el): if isinstance(el, Number): @@ -443,14 +442,14 @@ def update_for_type(self, key_hash, key: Type[Any]): @memoize_on_first_arg -def _connected_partitions( +def _connected_parts( dcoll: DiscretizationCollection, self_volume_tag: VolumeTag, other_volume_tag: VolumeTag - ) -> Sequence[PartitionID]: - result: List[PartitionID] = [ + ) -> Sequence[PartID]: + result: List[PartID] = [ connected_part_id - for connected_part_id, part_id in dcoll._inter_partition_connections.keys() + for connected_part_id, part_id in dcoll._inter_part_connections.keys() if ( part_id[0] == self_volume_tag and connected_part_id[0] == other_volume_tag)] @@ -496,8 +495,8 @@ def __init__(self, actx: ArrayContext, dcoll: DiscretizationCollection, *, - local_part_id: PartitionID, - remote_part_id: PartitionID, + local_part_id: PartID, + remote_part_id: PartID, local_bdry_data: ArrayOrContainer, send_data: ArrayOrContainer, comm_tag: Optional[Hashable] = None): @@ -545,7 +544,7 @@ def finish(self): self.recv_data_np, self.array_context) unswapped_remote_bdry_data = unflatten(self.local_bdry_data, recv_data_flat, self.array_context) - bdry_conn = self.dcoll._inter_partition_connections[ + bdry_conn = self.dcoll._inter_part_connections[ self.remote_part_id, self.local_part_id] remote_bdry_data = bdry_conn(unswapped_remote_bdry_data) @@ -570,8 +569,8 @@ def __init__(self, actx: ArrayContext, dcoll: DiscretizationCollection, *, - local_part_id: PartitionID, - remote_part_id: PartitionID, + local_part_id: PartID, + remote_part_id: PartID, local_bdry_data: ArrayOrContainer, send_data: ArrayOrContainer, comm_tag: Optional[Hashable] = None) -> None: @@ -616,7 +615,7 @@ def communicate_single_array(key, local_bdry_subary): communicate_single_array, self.local_bdry_data) def finish(self): - bdry_conn = self.dcoll._inter_partition_connections[ + bdry_conn = self.dcoll._inter_part_connections[ self.remote_part_id, self.local_part_id] return TracePair( @@ -639,9 +638,9 @@ def cross_rank_trace_pairs( r"""Get a :class:`list` of *ary* trace pairs for each partition boundary. For each partition boundary, the field data values in *ary* are - communicated to/from the neighboring partition. Presumably, this - communication is MPI (but strictly speaking, may not be, and this - routine is agnostic to the underlying communication). + communicated to/from the neighboring part. Presumably, this communication + is MPI (but strictly speaking, may not be, and this routine is agnostic to + the underlying communication). For each face on each partition boundary, a :class:`TracePair` is created with the locally, and @@ -693,7 +692,7 @@ def cross_rank_trace_pairs( local_part_id = (volume_dd.domain_tag.tag, rank) - connected_part_ids = _connected_partitions( + connected_part_ids = _connected_parts( dcoll, self_volume_tag=volume_dd.domain_tag.tag, other_volume_tag=volume_dd.domain_tag.tag) @@ -788,7 +787,7 @@ def cross_rank_inter_volume_trace_pairs( local_part_id = (self_volume_dd.domain_tag.tag, rank) - connected_part_ids = _connected_partitions( + connected_part_ids = _connected_parts( dcoll, self_volume_tag=self_volume_dd.domain_tag.tag, other_volume_tag=other_volume_dd.domain_tag.tag) From 06ff8f6b9d01a45c9f527730574f605785152efc Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 29 Jun 2022 16:09:39 -0500 Subject: [PATCH 06/30] account for explicit part_id attribute in InterPartAdjacencyGroup --- grudge/discretization.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 413e52707..379ea7d69 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -88,10 +88,11 @@ def _normalize_mesh_part_ids(mesh, promote_to_part_id): new_grp_list = [] for fagrp in grp_list: if isinstance(fagrp, InterPartAdjacencyGroup): + part_id = promote_to_part_id(fagrp.boundary_tag.part_id) new_fagrp = replace( fagrp, - boundary_tag=BTAG_PARTITION( - promote_to_part_id(fagrp.boundary_tag.part_id))) + boundary_tag=BTAG_PARTITION(part_id), + part_id=part_id) else: new_fagrp = fagrp new_grp_list.append(new_fagrp) From 49d627c868cf7f4d67a6923cfd60e766f94c676e Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Sun, 3 Jul 2022 23:40:13 -0500 Subject: [PATCH 07/30] use dataclass instead of tuple for PartID --- grudge/discretization.py | 84 +++++++++++++++++++++++++++++----------- grudge/trace_pair.py | 24 ++++++------ 2 files changed, 74 insertions(+), 34 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index 379ea7d69..b62b96c76 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -7,6 +7,7 @@ .. autofunction:: make_discretization_collection .. currentmodule:: grudge.discretization +.. autoclass:: PartID """ __copyright__ = """ @@ -38,7 +39,7 @@ from pytools import memoize_method, single_valued -from dataclasses import replace +from dataclasses import dataclass, replace from grudge.dof_desc import ( VTAG_ALL, @@ -73,12 +74,26 @@ import mpi4py.MPI -PartID = Tuple[VolumeTag, int] +@dataclass(frozen=True) +class PartID: + """Unique identifier for a piece of a partitioned mesh. + + .. attribute:: volume_tag + + The volume of the part. + + .. attribute:: rank + + The (optional) MPI rank of the part. + + """ + volume_tag: VolumeTag + rank: Optional[int] = None # {{{ part ID normalization -def _normalize_mesh_part_ids(mesh, promote_to_part_id): +def _normalize_mesh_part_ids(mesh, as_part_id): facial_adjacency_groups = mesh.facial_adjacency_groups new_facial_adjacency_groups = [] @@ -88,7 +103,7 @@ def _normalize_mesh_part_ids(mesh, promote_to_part_id): new_grp_list = [] for fagrp in grp_list: if isinstance(fagrp, InterPartAdjacencyGroup): - part_id = promote_to_part_id(fagrp.boundary_tag.part_id) + part_id = as_part_id(fagrp.part_id) new_fagrp = replace( fagrp, boundary_tag=BTAG_PARTITION(part_id), @@ -242,13 +257,21 @@ def __init__(self, array_context: ArrayContext, mesh = volume_discrs if mpi_communicator is not None: - def promote_to_part_id(key): - return (VTAG_ALL, key) + # Accept PartID or rank + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + elif isinstance(mesh_part_id, int): + return PartID(VTAG_ALL, mesh_part_id) + else: + raise TypeError( + f"Unable to convert {mesh_part_id} to PartID.") else: - def promote_to_part_id(key): - return (VTAG_ALL, None) + # Shouldn't be called + def as_part_id(mesh_part_id): + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - mesh = _normalize_mesh_part_ids(mesh, promote_to_part_id) + mesh = _normalize_mesh_part_ids(mesh, as_part_id) discr_tag_to_group_factory = _normalize_discr_tag_to_group_factory( dim=mesh.dim, @@ -797,7 +820,7 @@ def get_part_bdry_restriction(self_part_id, other_part_id): return cached_part_bdry_restrictions.setdefault( (self_part_id, other_part_id), make_face_restriction( - array_context, volume_discrs[self_part_id[0]], + array_context, volume_discrs[self_part_id.volume_tag], base_group_factory, boundary_tag=BTAG_PARTITION(other_part_id))) @@ -808,12 +831,12 @@ def get_part_bdry_restriction(self_part_id, other_part_id): irbis = [] for vtag, volume_discr in volume_discrs.items(): - part_id = (vtag, rank) + part_id = PartID(vtag, rank) connected_part_ids = get_connected_parts(volume_discr.mesh) for connected_part_id in connected_part_ids: bdry_restr = get_part_bdry_restriction(part_id, connected_part_id) - if connected_part_id[1] == rank: + if connected_part_id.rank == rank: # {{{ rank-local interface between multiple volumes rev_bdry_restr = get_part_bdry_restriction( @@ -841,7 +864,7 @@ def get_part_bdry_restriction(self_part_id, other_part_id): InterRankBoundaryInfo( local_part_id=part_id, remote_part_id=connected_part_id, - remote_rank=connected_part_id[1], + remote_rank=connected_part_id.rank, local_boundary_connection=bdry_restr)) # }}} @@ -958,19 +981,36 @@ def make_discretization_collection( if VTAG_ALL not in volumes.keys(): # Multi-volume if mpi_communicator is not None: - def promote_to_part_id(key): - return key + # Accept PartID + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") else: - def promote_to_part_id(key): - return (key, None) + # Accept PartID or volume tag + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + elif mesh_part_id in volumes.keys(): + return PartID(mesh_part_id) + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") else: # Single-volume if mpi_communicator is not None: - def promote_to_part_id(key): - return (VTAG_ALL, key) + # Accept PartID or rank + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + elif isinstance(mesh_part_id, int): + return PartID(VTAG_ALL, mesh_part_id) + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") else: - def promote_to_part_id(key): - return (VTAG_ALL, None) + # Shouldn't be called + def as_part_id(mesh_part_id): + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") if any( isinstance(mesh_or_discr, Discretization) @@ -979,7 +1019,7 @@ def promote_to_part_id(key): volume_discrs = { vtag: Discretization( - array_context, _normalize_mesh_part_ids(mesh, promote_to_part_id), + array_context, _normalize_mesh_part_ids(mesh, as_part_id), discr_tag_to_group_factory[DISCR_TAG_BASE]) for vtag, mesh in volumes.items()} diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index ef0e4cc01..d06fd13ea 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -383,8 +383,8 @@ def local_inter_volume_trace_pairs( if dcoll.mpi_communicator is not None else None) - self_part_id = (self_volume_dd.domain_tag.tag, rank) - other_part_id = (other_volume_dd.domain_tag.tag, rank) + self_part_id = PartID(self_volume_dd.domain_tag.tag, rank) + other_part_id = PartID(other_volume_dd.domain_tag.tag, rank) self_trace_dd = self_volume_dd.trace(BTAG_PARTITION(other_part_id)) other_trace_dd = other_volume_dd.trace(BTAG_PARTITION(self_part_id)) @@ -451,8 +451,8 @@ def _connected_parts( connected_part_id for connected_part_id, part_id in dcoll._inter_part_connections.keys() if ( - part_id[0] == self_volume_tag - and connected_part_id[0] == other_volume_tag)] + part_id.volume_tag == self_volume_tag + and connected_part_id.volume_tag == other_volume_tag)] return result @@ -504,7 +504,7 @@ def __init__(self, comm = dcoll.mpi_communicator assert comm is not None - remote_rank = remote_part_id[1] + remote_rank = remote_part_id.rank assert remote_rank is not None self.dcoll = dcoll @@ -585,7 +585,7 @@ def __init__(self, self.local_part_id = local_part_id self.remote_part_id = remote_part_id - remote_rank = remote_part_id[1] + remote_rank = remote_part_id.rank assert remote_rank is not None self.local_bdry_data = local_bdry_data @@ -690,7 +690,7 @@ def cross_rank_trace_pairs( rank = dcoll.mpi_communicator.Get_rank() - local_part_id = (volume_dd.domain_tag.tag, rank) + local_part_id = PartID(volume_dd.domain_tag.tag, rank) connected_part_ids = _connected_parts( dcoll, self_volume_tag=volume_dd.domain_tag.tag, @@ -699,12 +699,12 @@ def cross_rank_trace_pairs( remote_part_ids = [ part_id for part_id in connected_part_ids - if part_id[1] != rank] + if part_id.rank != rank] # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(remote_part_ids) == len({part_id[1] for part_id in remote_part_ids}) + assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) if isinstance(ary, Number): # NOTE: Assumes that the same number is passed on every rank @@ -785,7 +785,7 @@ def cross_rank_inter_volume_trace_pairs( rank = dcoll.mpi_communicator.Get_rank() - local_part_id = (self_volume_dd.domain_tag.tag, rank) + local_part_id = PartID(self_volume_dd.domain_tag.tag, rank) connected_part_ids = _connected_parts( dcoll, self_volume_tag=self_volume_dd.domain_tag.tag, @@ -794,12 +794,12 @@ def cross_rank_inter_volume_trace_pairs( remote_part_ids = [ part_id for part_id in connected_part_ids - if part_id[1] != rank] + if part_id.rank != rank] # This asserts that there is only one data exchange per rank, so that # there is no risk of mismatched data reaching the wrong recipient. # (Since we have only a single tag.) - assert len(remote_part_ids) == len({part_id[1] for part_id in remote_part_ids}) + assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) actx = get_container_context_recursively(self_ary) assert actx is not None From b2219d8bab9e33f2d7837c3a51f64711507447bf Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 4 Jul 2022 00:51:55 -0500 Subject: [PATCH 08/30] move PartID conversion setup stuff into _normalize_mesh_part_ids --- grudge/discretization.py | 98 ++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index b62b96c76..abb198e76 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -35,7 +35,7 @@ THE SOFTWARE. """ -from typing import Mapping, Optional, Union, Tuple, TYPE_CHECKING, Any +from typing import Sequence, Mapping, Optional, Union, Tuple, TYPE_CHECKING, Any from pytools import memoize_method, single_valued @@ -93,7 +93,45 @@ class PartID: # {{{ part ID normalization -def _normalize_mesh_part_ids(mesh, as_part_id): +def _normalize_mesh_part_ids( + mesh: Mesh, + volume_tags: Sequence[VolumeTag], + mpi_communicator: Optional["mpi4py.MPI.Intracomm"] = None): + """Convert a mesh's configuration-dependent "part ID" into a fixed type.""" + if VTAG_ALL not in volume_tags: + # Multi-volume + if mpi_communicator is not None: + # Accept PartID + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") + else: + # Accept PartID or volume tag + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + elif mesh_part_id in volume_tags: + return PartID(mesh_part_id) + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") + else: + # Single-volume + if mpi_communicator is not None: + # Accept PartID or rank + def as_part_id(mesh_part_id): + if isinstance(mesh_part_id, PartID): + return mesh_part_id + elif isinstance(mesh_part_id, int): + return PartID(VTAG_ALL, mesh_part_id) + else: + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") + else: + # Shouldn't be called + def as_part_id(mesh_part_id): + raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") + facial_adjacency_groups = mesh.facial_adjacency_groups new_facial_adjacency_groups = [] @@ -256,22 +294,8 @@ def __init__(self, array_context: ArrayContext, mesh = volume_discrs - if mpi_communicator is not None: - # Accept PartID or rank - def as_part_id(mesh_part_id): - if isinstance(mesh_part_id, PartID): - return mesh_part_id - elif isinstance(mesh_part_id, int): - return PartID(VTAG_ALL, mesh_part_id) - else: - raise TypeError( - f"Unable to convert {mesh_part_id} to PartID.") - else: - # Shouldn't be called - def as_part_id(mesh_part_id): - raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - - mesh = _normalize_mesh_part_ids(mesh, as_part_id) + mesh = _normalize_mesh_part_ids( + mesh, [VTAG_ALL], mpi_communicator=mpi_communicator) discr_tag_to_group_factory = _normalize_discr_tag_to_group_factory( dim=mesh.dim, @@ -978,40 +1002,6 @@ def make_discretization_collection( mpi_communicator = getattr(array_context, "mpi_communicator", None) - if VTAG_ALL not in volumes.keys(): - # Multi-volume - if mpi_communicator is not None: - # Accept PartID - def as_part_id(mesh_part_id): - if isinstance(mesh_part_id, PartID): - return mesh_part_id - else: - raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - else: - # Accept PartID or volume tag - def as_part_id(mesh_part_id): - if isinstance(mesh_part_id, PartID): - return mesh_part_id - elif mesh_part_id in volumes.keys(): - return PartID(mesh_part_id) - else: - raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - else: - # Single-volume - if mpi_communicator is not None: - # Accept PartID or rank - def as_part_id(mesh_part_id): - if isinstance(mesh_part_id, PartID): - return mesh_part_id - elif isinstance(mesh_part_id, int): - return PartID(VTAG_ALL, mesh_part_id) - else: - raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - else: - # Shouldn't be called - def as_part_id(mesh_part_id): - raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") - if any( isinstance(mesh_or_discr, Discretization) for mesh_or_discr in volumes.values()): @@ -1019,7 +1009,9 @@ def as_part_id(mesh_part_id): volume_discrs = { vtag: Discretization( - array_context, _normalize_mesh_part_ids(mesh, as_part_id), + array_context, + _normalize_mesh_part_ids( + mesh, volumes.keys(), mpi_communicator=mpi_communicator), discr_tag_to_group_factory[DISCR_TAG_BASE]) for vtag, mesh in volumes.items()} From a05c62633d5ddbb90f5c9c4f1f78580e434f5923 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 4 Jul 2022 10:26:21 -0500 Subject: [PATCH 09/30] reset requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d2383ba1d..2107e5aeb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ git+https://github.com/inducer/leap.git#egg=leap git+https://github.com/inducer/meshpy.git#egg=meshpy git+https://github.com/inducer/modepy.git#egg=modepy git+https://github.com/inducer/arraycontext.git#egg=arraycontext -git+https://github.com/majosm/meshmode.git@mpi-distribute#egg=meshmode +git+https://github.com/inducer/meshmode.git#egg=meshmode git+https://github.com/inducer/pyvisfile.git#egg=pyvisfile git+https://github.com/inducer/pymetis.git#egg=pymetis git+https://github.com/illinois-ceesd/logpyle.git#egg=logpyle From fb8a34b6812c2d649e9864dd3351ee1c5efc24bc Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 4 Jul 2022 12:30:51 -0500 Subject: [PATCH 10/30] accept general integral types as MPI ranks --- grudge/discretization.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index abb198e76..c05a59c35 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -98,6 +98,7 @@ def _normalize_mesh_part_ids( volume_tags: Sequence[VolumeTag], mpi_communicator: Optional["mpi4py.MPI.Intracomm"] = None): """Convert a mesh's configuration-dependent "part ID" into a fixed type.""" + from numbers import Integral if VTAG_ALL not in volume_tags: # Multi-volume if mpi_communicator is not None: @@ -123,8 +124,8 @@ def as_part_id(mesh_part_id): def as_part_id(mesh_part_id): if isinstance(mesh_part_id, PartID): return mesh_part_id - elif isinstance(mesh_part_id, int): - return PartID(VTAG_ALL, mesh_part_id) + elif isinstance(mesh_part_id, Integral): + return PartID(VTAG_ALL, int(mesh_part_id)) else: raise TypeError(f"Unable to convert {mesh_part_id} to PartID.") else: From dce83fbdea18e9967748bf321b401dd9f48947c4 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Tue, 9 Aug 2022 10:40:03 -0500 Subject: [PATCH 11/30] simplify partitioning in test --- test/test_grudge.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/test/test_grudge.py b/test/test_grudge.py index 949466bf5..819752098 100644 --- a/test/test_grudge.py +++ b/test/test_grudge.py @@ -1086,10 +1086,9 @@ def test_multi_volume(actx_factory): meg, = mesh.groups x = mesh.vertices[0, meg.vertex_indices] x_elem_avg = np.sum(x, axis=1)/x.shape[1] - volume_per_element = (x_elem_avg > 0).astype(np.int32) - - from meshmode.distributed import membership_list_to_map - volume_to_elements = membership_list_to_map(volume_per_element) + volume_to_elements = { + 0: np.where(x_elem_avg <= 0)[0], + 1: np.where(x_elem_avg > 0)[0]} from meshmode.mesh.processing import partition_mesh volume_to_mesh = partition_mesh(mesh, volume_to_elements) From 9590cdc13b6a1a8278d4c12b6e8ec357e67cdaa8 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 23 Mar 2022 15:03:50 -0500 Subject: [PATCH 12/30] redesign inter-volume trace pair functions --- grudge/trace_pair.py | 536 +++++++++++++++++++++++++++---------------- 1 file changed, 344 insertions(+), 192 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index d06fd13ea..158db551d 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -55,7 +55,7 @@ from warnings import warn -from typing import List, Hashable, Optional, Type, Any, Sequence +from typing import List, Hashable, Optional, Tuple, Type, Any, Sequence, Mapping from pytools.persistent_dict import KeyBuilder @@ -65,9 +65,9 @@ with_container_arithmetic, dataclass_array_container, get_container_context_recursively, - flatten, to_numpy, - unflatten, from_numpy, - flat_size_and_dtype, + get_container_context_recursively_opt, + to_numpy, + from_numpy, ArrayOrContainer ) @@ -76,7 +76,6 @@ from numbers import Number from pytools import memoize_on_first_arg -from pytools.obj_array import obj_array_vectorize from grudge.discretization import DiscretizationCollection, PartID from grudge.projection import project @@ -297,16 +296,22 @@ def local_interior_trace_pair( interior = project(dcoll, volume_dd, trace_dd, vec) - def get_opposite_trace(el): - if isinstance(el, Number): - return el + opposite_face_conn = dcoll.opposite_face_connection(trace_dd.domain_tag) + + def get_opposite_trace(ary): + if isinstance(ary, Number): + return ary else: - assert isinstance(trace_dd.domain_tag, BoundaryDomainTag) - return dcoll.opposite_face_connection(trace_dd.domain_tag)(el) + return opposite_face_conn(ary) - e = obj_array_vectorize(get_opposite_trace, interior) + from arraycontext import rec_map_array_container + from meshmode.dof_array import DOFArray + exterior = rec_map_array_container( + get_opposite_trace, + interior, + leaf_class=DOFArray) - return TracePair(trace_dd, interior=interior, exterior=e) + return TracePair(trace_dd, interior=interior, exterior=exterior) def interior_trace_pair(dcoll: DiscretizationCollection, vec) -> TracePair: @@ -364,57 +369,87 @@ def interior_trace_pairs(dcoll: DiscretizationCollection, vec, *, def local_inter_volume_trace_pairs( dcoll: DiscretizationCollection, - self_volume_dd: DOFDesc, self_ary: ArrayOrContainer, - other_volume_dd: DOFDesc, other_ary: ArrayOrContainer, - ) -> ArrayOrContainer: - if not isinstance(self_volume_dd.domain_tag, VolumeDomainTag): - raise ValueError("self_volume_dd must describe a volume") - if not isinstance(other_volume_dd.domain_tag, VolumeDomainTag): - raise ValueError("other_volume_dd must describe a volume") - if self_volume_dd.discretization_tag != DISCR_TAG_BASE: - raise TypeError( - f"expected a base-discretized self DOFDesc, got '{self_volume_dd}'") - if other_volume_dd.discretization_tag != DISCR_TAG_BASE: - raise TypeError( - f"expected a base-discretized other DOFDesc, got '{other_volume_dd}'") + pairwise_volume_data: Mapping[ + Tuple[DOFDesc, DOFDesc], + Tuple[ArrayOrContainer, ArrayOrContainer]] + ) -> Mapping[Tuple[DOFDesc, DOFDesc], TracePair]: + for vol_dd_pair in pairwise_volume_data.keys(): + for vol_dd in vol_dd_pair: + if not isinstance(vol_dd.domain_tag, VolumeDomainTag): + raise ValueError( + "pairwise_volume_data keys must describe volumes, " + f"got '{vol_dd}'") + if vol_dd.discretization_tag != DISCR_TAG_BASE: + raise ValueError( + "expected base-discretized DOFDesc in pairwise_volume_data, " + f"got '{vol_dd}'") rank = ( dcoll.mpi_communicator.Get_rank() if dcoll.mpi_communicator is not None else None) - self_part_id = PartID(self_volume_dd.domain_tag.tag, rank) - other_part_id = PartID(other_volume_dd.domain_tag.tag, rank) - - self_trace_dd = self_volume_dd.trace(BTAG_PARTITION(other_part_id)) - other_trace_dd = other_volume_dd.trace(BTAG_PARTITION(self_part_id)) - - # FIXME: In all likelihood, these traces will be reevaluated from - # the other side, which is hard to prevent given the interface we - # have. Lazy eval will hopefully collapse those redundant evaluations... - self_trace = project( - dcoll, self_volume_dd, self_trace_dd, self_ary) - other_trace = project( - dcoll, other_volume_dd, other_trace_dd, other_ary) + result: Mapping[Tuple[DOFDesc, DOFDesc], TracePair] = {} + + for vol_dd_pair, vol_data_pair in pairwise_volume_data.items(): + directional_vol_dd_pairs = [ + (vol_dd_pair[1], vol_dd_pair[0]), + (vol_dd_pair[0], vol_dd_pair[1])] + + trace_dd_pair = tuple( + self_vol_dd.trace( + BTAG_PARTITION( + PartID(other_vol_dd.domain_tag.tag, rank))) + for other_vol_dd, self_vol_dd in directional_vol_dd_pairs) + + # Pre-compute the projections out here to avoid doing it twice inside + # the loop below + trace_data = { + trace_dd: project(dcoll, vol_dd, trace_dd, vol_data) + for vol_dd, trace_dd, vol_data in zip( + vol_dd_pair, trace_dd_pair, vol_data_pair)} + + for other_vol_dd, self_vol_dd in directional_vol_dd_pairs: + self_part_id = PartID(self_vol_dd.domain_tag.tag, rank) + other_part_id = PartID(other_vol_dd.domain_tag.tag, rank) + + self_trace_dd = self_vol_dd.trace(BTAG_PARTITION(other_part_id)) + other_trace_dd = other_vol_dd.trace(BTAG_PARTITION(self_part_id)) + + self_trace_data = trace_data[self_trace_dd] + unswapped_other_trace_data = trace_data[other_trace_dd] + + other_to_self = dcoll._inter_part_connections[ + other_part_id, self_part_id] + + def get_opposite_trace(ary): + if isinstance(ary, Number): + return ary + else: + return other_to_self(ary) + + from arraycontext import rec_map_array_container + from meshmode.dof_array import DOFArray + other_trace_data = rec_map_array_container( + get_opposite_trace, + unswapped_other_trace_data, + leaf_class=DOFArray) + + result[other_vol_dd, self_vol_dd] = TracePair( + self_trace_dd, + interior=self_trace_data, + exterior=other_trace_data) - other_to_self = dcoll._inter_part_connections[other_part_id, self_part_id] - - def get_opposite_trace(el): - if isinstance(el, Number): - return el - else: - return other_to_self(el) - - return TracePair( - self_trace_dd, - interior=self_trace, - exterior=obj_array_vectorize(get_opposite_trace, other_trace)) + return result def inter_volume_trace_pairs(dcoll: DiscretizationCollection, - self_volume_dd: DOFDesc, self_ary: ArrayOrContainer, - other_volume_dd: DOFDesc, other_ary: ArrayOrContainer, - comm_tag: Hashable = None) -> List[ArrayOrContainer]: + pairwise_volume_data: Mapping[ + Tuple[DOFDesc, DOFDesc], + Tuple[ArrayOrContainer, ArrayOrContainer]], + comm_tag: Hashable = None) -> Mapping[ + Tuple[DOFDesc, DOFDesc], + List[TracePair]]: """ Note that :func:`local_inter_volume_trace_pairs` provides the rank-local contributions if those are needed in isolation. Similarly, @@ -423,13 +458,21 @@ def inter_volume_trace_pairs(dcoll: DiscretizationCollection, """ # TODO documentation - return ( - [local_inter_volume_trace_pairs(dcoll, - self_volume_dd, self_ary, other_volume_dd, other_ary)] - + cross_rank_inter_volume_trace_pairs(dcoll, - self_volume_dd, self_ary, other_volume_dd, other_ary, - comm_tag=comm_tag) - ) + result: Mapping[ + Tuple[DOFDesc, DOFDesc], + List[TracePair]] = {} + + local_tpairs = local_inter_volume_trace_pairs(dcoll, pairwise_volume_data) + cross_rank_tpairs = cross_rank_inter_volume_trace_pairs( + dcoll, pairwise_volume_data, comm_tag=comm_tag) + + for directional_vol_dd_pair, tpair in local_tpairs.items(): + result[directional_vol_dd_pair] = [tpair] + + for directional_vol_dd_pair, tpairs in cross_rank_tpairs.items(): + result.setdefault(directional_vol_dd_pair, []).append(tpairs) + + return result # }}} @@ -498,7 +541,7 @@ def __init__(self, local_part_id: PartID, remote_part_id: PartID, local_bdry_data: ArrayOrContainer, - send_data: ArrayOrContainer, + remote_bdry_data_template: ArrayOrContainer, comm_tag: Optional[Hashable] = None): comm = dcoll.mpi_communicator @@ -511,9 +554,14 @@ def __init__(self, self.array_context = actx self.local_part_id = local_part_id self.remote_part_id = remote_part_id - self.bdry_discr = dcoll.discr_from_dd( - BoundaryDomainTag(BTAG_PARTITION(remote_part_id))) + self.local_bdry_dd = DOFDesc( + BoundaryDomainTag( + BTAG_PARTITION(remote_part_id), + volume_tag=local_part_id.volume_tag), + DISCR_TAG_BASE) + self.bdry_discr = dcoll.discr_from_dd(self.local_bdry_dd) self.local_bdry_data = local_bdry_data + self.remote_bdry_data_template = remote_bdry_data_template self.comm_tag = self.base_comm_tag comm_tag = _sym_tag_to_num_tag(comm_tag) @@ -526,36 +574,73 @@ def __init__(self, # requests is complete, however it is not clear that this is documented # behavior. We hold on to the buffer (via the instance attribute) # as well, just in case. - self.send_data_np = to_numpy(flatten(send_data, actx), actx) - self.send_req = comm.Isend(self.send_data_np, - remote_rank, - tag=self.comm_tag) + self.send_reqs = [] + self.send_data = [] + + def send_single_array(key, local_subary): + if not isinstance(local_subary, Number): + local_subary_np = to_numpy(local_subary, actx) + self.send_reqs.append( + comm.Isend(local_subary_np, remote_rank, tag=self.comm_tag)) + self.send_data.append(local_subary_np) + return local_subary + + self.recv_reqs = [] + self.recv_data = {} + + def recv_single_array(key, remote_subary_template): + if not isinstance(remote_subary_template, Number): + remote_subary_np = np.empty( + remote_subary_template.shape, + remote_subary_template.dtype) + self.recv_reqs.append( + comm.Irecv(remote_subary_np, remote_rank, tag=self.comm_tag)) + self.recv_data[key] = remote_subary_np + return remote_subary_template - recv_size, recv_dtype = flat_size_and_dtype(local_bdry_data) - self.recv_data_np = np.empty(recv_size, recv_dtype) - self.recv_req = comm.Irecv(self.recv_data_np, remote_rank, tag=self.comm_tag) + from arraycontext.container.traversal import rec_keyed_map_array_container + rec_keyed_map_array_container(send_single_array, local_bdry_data) + rec_keyed_map_array_container(recv_single_array, remote_bdry_data_template) def finish(self): - # Wait for the nonblocking receive request to complete before + from mpi4py import MPI + + # Wait for the nonblocking receive requests to complete before # accessing the data - self.recv_req.Wait() + MPI.Request.waitall(self.recv_reqs) - recv_data_flat = from_numpy( - self.recv_data_np, self.array_context) - unswapped_remote_bdry_data = unflatten(self.local_bdry_data, - recv_data_flat, self.array_context) - bdry_conn = self.dcoll._inter_part_connections[ + def finish_single_array(key, remote_subary_template): + if isinstance(remote_subary_template, Number): + # NOTE: Assumes that the same number is passed on every rank + return remote_subary_template + else: + return from_numpy(self.recv_data[key], self.array_context) + + from arraycontext.container.traversal import rec_keyed_map_array_container + unswapped_remote_bdry_data = rec_keyed_map_array_container( + finish_single_array, self.remote_bdry_data_template) + + remote_to_local = self.dcoll._inter_part_connections[ self.remote_part_id, self.local_part_id] - remote_bdry_data = bdry_conn(unswapped_remote_bdry_data) - # Complete the nonblocking send request associated with communicating - # `self.local_bdry_data_np` - self.send_req.Wait() + def get_opposite_trace(ary): + if isinstance(ary, Number): + return ary + else: + return remote_to_local(ary) + + from arraycontext import rec_map_array_container + from meshmode.dof_array import DOFArray + remote_bdry_data = rec_map_array_container( + get_opposite_trace, + unswapped_remote_bdry_data, + leaf_class=DOFArray) + + # Complete the nonblocking send requests + MPI.Request.waitall(self.send_reqs) return TracePair( - DOFDesc( - BoundaryDomainTag(BTAG_PARTITION(self.remote_part_id)), - DISCR_TAG_BASE), + self.local_bdry_dd, interior=self.local_bdry_data, exterior=remote_bdry_data) @@ -572,64 +657,108 @@ def __init__(self, local_part_id: PartID, remote_part_id: PartID, local_bdry_data: ArrayOrContainer, - send_data: ArrayOrContainer, + remote_bdry_data_template: ArrayOrContainer, comm_tag: Optional[Hashable] = None) -> None: if comm_tag is None: raise ValueError("lazy communication requires 'comm_tag' to be supplied") + remote_rank = remote_part_id.rank + assert remote_rank is not None + self.dcoll = dcoll self.array_context = actx - self.bdry_discr = dcoll.discr_from_dd( - BoundaryDomainTag(BTAG_PARTITION(remote_part_id))) + self.local_bdry_dd = DOFDesc( + BoundaryDomainTag( + BTAG_PARTITION(remote_part_id), + volume_tag=local_part_id.volume_tag), + DISCR_TAG_BASE) + self.bdry_discr = dcoll.discr_from_dd(self.local_bdry_dd) self.local_part_id = local_part_id self.remote_part_id = remote_part_id - remote_rank = remote_part_id.rank - assert remote_rank is not None - - self.local_bdry_data = local_bdry_data - - from arraycontext.container.traversal import rec_keyed_map_array_container - - key_to_send_subary = {} - - def store_send_subary(key, send_subary): - key_to_send_subary[key] = send_subary - return send_subary - rec_keyed_map_array_container(store_send_subary, send_data) - from pytato import make_distributed_recv, staple_distributed_send - def communicate_single_array(key, local_bdry_subary): - ary_tag = (comm_tag, key) - return staple_distributed_send( - key_to_send_subary[key], dest_rank=remote_rank, comm_tag=ary_tag, - stapled_to=make_distributed_recv( - src_rank=remote_rank, comm_tag=ary_tag, - shape=local_bdry_subary.shape, - dtype=local_bdry_subary.dtype, - axes=local_bdry_subary.axes)) + # Staple the sends to a bunch of dummy arrays of zeros + def send_single_array(key, local_subary): + if isinstance(local_subary, Number): + return 0 + else: + ary_tag = (comm_tag, key) + return staple_distributed_send( + local_subary, dest_rank=remote_rank, comm_tag=ary_tag, + stapled_to=actx.zeros_like(local_subary)) + + def recv_single_array(key, remote_subary_template): + if isinstance(remote_subary_template, Number): + # NOTE: Assumes that the same number is passed on every rank + return remote_subary_template + else: + ary_tag = (comm_tag, key) + return make_distributed_recv( + src_rank=remote_rank, comm_tag=ary_tag, + shape=remote_subary_template.shape, + dtype=remote_subary_template.dtype, + axes=remote_subary_template.axes) - self.remote_data = rec_keyed_map_array_container( - communicate_single_array, self.local_bdry_data) + from arraycontext.container.traversal import rec_keyed_map_array_container + zeros_like_local_bdry_data = rec_keyed_map_array_container( + send_single_array, local_bdry_data) + unswapped_remote_bdry_data = rec_keyed_map_array_container( + recv_single_array, remote_bdry_data_template) + + # Sum up the dummy zeros + zero = actx.np.sum(zeros_like_local_bdry_data) + + # Add the dummy zeros and hope that the caller proceeds to actually + # use some of this data on every rank... + from arraycontext import rec_map_array_container + # This caused test_mpi_communication.py::test_func_comparison_mpi to fail + # for some reason +# self.local_bdry_data = rec_map_array_container( +# lambda x: x + zero, +# local_bdry_data) + self.local_bdry_data = local_bdry_data + self.unswapped_remote_bdry_data = rec_map_array_container( + lambda x: x + zero, + unswapped_remote_bdry_data) def finish(self): - bdry_conn = self.dcoll._inter_part_connections[ + remote_to_local = self.dcoll._inter_part_connections[ self.remote_part_id, self.local_part_id] + def get_opposite_trace(ary): + if isinstance(ary, Number): + return ary + else: + return remote_to_local(ary) + + from arraycontext import rec_map_array_container + from meshmode.dof_array import DOFArray + remote_bdry_data = rec_map_array_container( + get_opposite_trace, + self.unswapped_remote_bdry_data, + leaf_class=DOFArray) + return TracePair( - DOFDesc( - BoundaryDomainTag(BTAG_PARTITION(self.remote_part_id)), - DISCR_TAG_BASE), + self.local_bdry_dd, interior=self.local_bdry_data, - exterior=bdry_conn(self.remote_data)) + exterior=remote_bdry_data) # }}} # {{{ cross_rank_trace_pairs +def _replace_dof_arrays(array_container, dof_array): + from arraycontext import rec_map_array_container + from meshmode.dof_array import DOFArray + return rec_map_array_container( + lambda x: dof_array if isinstance(x, DOFArray) else x, + array_container, + leaf_class=DOFArray) + + def cross_rank_trace_pairs( dcoll: DiscretizationCollection, ary: ArrayOrContainer, tag: Hashable = None, @@ -706,42 +835,37 @@ def cross_rank_trace_pairs( # (Since we have only a single tag.) assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) - if isinstance(ary, Number): - # NOTE: Assumes that the same number is passed on every rank - return [ - TracePair( - DOFDesc( - BoundaryDomainTag(BTAG_PARTITION(remote_part_id)), - DISCR_TAG_BASE), - interior=ary, exterior=ary) - for remote_part_id in remote_part_ids] - actx = get_container_context_recursively(ary) assert actx is not None from grudge.array_context import MPIPytatoArrayContextBase if isinstance(actx, MPIPytatoArrayContextBase): - rbc = _RankBoundaryCommunicationLazy + rbc_class = _RankBoundaryCommunicationLazy else: - rbc = _RankBoundaryCommunicationEager + rbc_class = _RankBoundaryCommunicationEager - def start_comm(remote_part_id): - bdtag = BoundaryDomainTag(BTAG_PARTITION(remote_part_id)) + rank_bdry_communicators = [] - local_bdry_data = project(dcoll, volume_dd, bdtag, ary) + for remote_part_id in remote_part_ids: + bdry_dd = volume_dd.trace(BTAG_PARTITION(remote_part_id)) - return rbc(actx, dcoll, - local_part_id=local_part_id, - remote_part_id=remote_part_id, - local_bdry_data=local_bdry_data, - send_data=local_bdry_data, - comm_tag=comm_tag) + local_bdry_data = project(dcoll, volume_dd, bdry_dd, ary) - rank_bdry_communcators = [ - start_comm(remote_part_id) - for remote_part_id in remote_part_ids] - return [rc.finish() for rc in rank_bdry_communcators] + remote_bdry_data_template = _replace_dof_arrays( + local_bdry_data, + dcoll._inter_part_connections[ + remote_part_id, local_part_id].from_discr.zeros(actx)) + + rank_bdry_communicators.append( + rbc_class(actx, dcoll, + local_part_id=local_part_id, + remote_part_id=remote_part_id, + local_bdry_data=local_bdry_data, + remote_bdry_data_template=remote_bdry_data_template, + comm_tag=comm_tag)) + + return [rbc.finish() for rbc in rank_bdry_communicators] # }}} @@ -750,10 +874,13 @@ def start_comm(remote_part_id): def cross_rank_inter_volume_trace_pairs( dcoll: DiscretizationCollection, - self_volume_dd: DOFDesc, self_ary: ArrayOrContainer, - other_volume_dd: DOFDesc, other_ary: ArrayOrContainer, + pairwise_volume_data: Mapping[ + Tuple[DOFDesc, DOFDesc], + Tuple[ArrayOrContainer, ArrayOrContainer]], *, comm_tag: Hashable = None, - ) -> List[TracePair]: + ) -> Mapping[ + Tuple[DOFDesc, DOFDesc], + List[TracePair]]: # FIXME: Should this interface take in boundary data instead? # TODO: Docs r"""Get a :class:`list` of *ary* trace pairs for each partition boundary. @@ -767,16 +894,16 @@ def cross_rank_inter_volume_trace_pairs( """ # {{{ process arguments - if not isinstance(self_volume_dd.domain_tag, VolumeDomainTag): - raise ValueError("self_volume_dd must describe a volume") - if not isinstance(other_volume_dd.domain_tag, VolumeDomainTag): - raise ValueError("other_volume_dd must describe a volume") - if self_volume_dd.discretization_tag != DISCR_TAG_BASE: - raise TypeError( - f"expected a base-discretized self DOFDesc, got '{self_volume_dd}'") - if other_volume_dd.discretization_tag != DISCR_TAG_BASE: - raise TypeError( - f"expected a base-discretized other DOFDesc, got '{other_volume_dd}'") + for vol_dd_pair in pairwise_volume_data.keys(): + for vol_dd in vol_dd_pair: + if not isinstance(vol_dd.domain_tag, VolumeDomainTag): + raise ValueError( + "pairwise_volume_data keys must describe volumes, " + f"got '{vol_dd}'") + if vol_dd.discretization_tag != DISCR_TAG_BASE: + raise ValueError( + "expected base-discretized DOFDesc in pairwise_volume_data, " + f"got '{vol_dd}'") # }}} @@ -785,50 +912,75 @@ def cross_rank_inter_volume_trace_pairs( rank = dcoll.mpi_communicator.Get_rank() - local_part_id = PartID(self_volume_dd.domain_tag.tag, rank) - - connected_part_ids = _connected_parts( - dcoll, self_volume_tag=self_volume_dd.domain_tag.tag, - other_volume_tag=other_volume_dd.domain_tag.tag) - - remote_part_ids = [ - part_id - for part_id in connected_part_ids - if part_id.rank != rank] - - # This asserts that there is only one data exchange per rank, so that - # there is no risk of mismatched data reaching the wrong recipient. - # (Since we have only a single tag.) - assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) - - actx = get_container_context_recursively(self_ary) + for vol_data_pair in pairwise_volume_data.values(): + for vol_data in vol_data_pair: + actx = get_container_context_recursively_opt(vol_data) + if actx is not None: + break + if actx is not None: + break assert actx is not None from grudge.array_context import MPIPytatoArrayContextBase if isinstance(actx, MPIPytatoArrayContextBase): - rbc = _RankBoundaryCommunicationLazy + rbc_class = _RankBoundaryCommunicationLazy else: - rbc = _RankBoundaryCommunicationEager - - def start_comm(remote_part_id): - bdtag = BoundaryDomainTag(BTAG_PARTITION(remote_part_id)) - - local_bdry_data = project(dcoll, self_volume_dd, bdtag, self_ary) - send_data = project(dcoll, other_volume_dd, - BTAG_PARTITION(local_part_id), other_ary) - - return rbc(actx, dcoll, - local_part_id=local_part_id, - remote_part_id=remote_part_id, - local_bdry_data=local_bdry_data, - send_data=send_data, - comm_tag=comm_tag) + rbc_class = _RankBoundaryCommunicationEager - rank_bdry_communcators = [ - start_comm(remote_part_id) - for remote_part_id in remote_part_ids] - return [rc.finish() for rc in rank_bdry_communcators] + def get_remote_connected_parts(local_vol_dd, remote_vol_dd): + connected_part_ids = _connected_parts( + dcoll, self_volume_tag=local_vol_dd.domain_tag.tag, + other_volume_tag=remote_vol_dd.domain_tag.tag) + return [ + part_id + for part_id in connected_part_ids + if part_id.rank != rank] + + rank_bdry_communicators = {} + + for vol_dd_pair, vol_data_pair in pairwise_volume_data.items(): + directional_volume_data = { + (vol_dd_pair[0], vol_dd_pair[1]): (vol_data_pair[0], vol_data_pair[1]), + (vol_dd_pair[1], vol_dd_pair[0]): (vol_data_pair[1], vol_data_pair[0])} + + for dd_pair, data_pair in directional_volume_data.items(): + other_vol_dd, self_vol_dd = dd_pair + other_vol_data, self_vol_data = data_pair + + self_part_id = PartID(self_vol_dd.domain_tag.tag, rank) + other_part_ids = get_remote_connected_parts(self_vol_dd, other_vol_dd) + + rbcs = [] + + for other_part_id in other_part_ids: + self_bdry_dd = self_vol_dd.trace(BTAG_PARTITION(other_part_id)) + self_bdry_data = project( + dcoll, self_vol_dd, self_bdry_dd, self_vol_data) + + other_bdry_template_dd = other_vol_dd.trace( + BTAG_PARTITION(self_part_id)) + other_bdry_container_template = project( + dcoll, other_vol_dd, other_bdry_template_dd, + other_vol_data) + other_bdry_data_template = _replace_dof_arrays( + other_bdry_container_template, + dcoll._inter_part_connections[ + other_part_id, self_part_id].from_discr.zeros(actx)) + + rbcs.append( + rbc_class(actx, dcoll, + local_part_id=self_part_id, + remote_part_id=other_part_id, + local_bdry_data=self_bdry_data, + remote_bdry_data_template=other_bdry_data_template, + comm_tag=comm_tag)) + + rank_bdry_communicators[other_vol_dd, self_vol_dd] = rbcs + + return { + directional_vol_dd_pair: [rbc.finish() for rbc in rbcs] + for directional_vol_dd_pair, rbcs in rank_bdry_communicators.items()} # }}} From 9e0a95f423e4df80890d7d18dfba627355a83b27 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 25 Mar 2022 08:59:34 -0500 Subject: [PATCH 13/30] reapply zero addition to local_bdry_data test failure was caused by bug in test (that has since been fixed) --- grudge/trace_pair.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 158db551d..e5caa77a9 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -713,12 +713,9 @@ def recv_single_array(key, remote_subary_template): # Add the dummy zeros and hope that the caller proceeds to actually # use some of this data on every rank... from arraycontext import rec_map_array_container - # This caused test_mpi_communication.py::test_func_comparison_mpi to fail - # for some reason -# self.local_bdry_data = rec_map_array_container( -# lambda x: x + zero, -# local_bdry_data) - self.local_bdry_data = local_bdry_data + self.local_bdry_data = rec_map_array_container( + lambda x: x + zero, + local_bdry_data) self.unswapped_remote_bdry_data = rec_map_array_container( lambda x: x + zero, unswapped_remote_bdry_data) From ef4a17cebe7dbe58df0cb323fd428e0364cd3c7e Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 25 Mar 2022 11:49:51 -0500 Subject: [PATCH 14/30] handle all-Number cases --- grudge/trace_pair.py | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index e5caa77a9..76b4c6876 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -833,7 +833,14 @@ def cross_rank_trace_pairs( assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) actx = get_container_context_recursively(ary) - assert actx is not None + + if actx is None: + # NOTE: Assumes that the same number is passed on every rank + return [ + TracePair( + volume_dd.trace(BTAG_PARTITION(remote_part_id)), + interior=ary, exterior=ary) + for remote_part_id in remote_part_ids] from grudge.array_context import MPIPytatoArrayContextBase @@ -916,14 +923,6 @@ def cross_rank_inter_volume_trace_pairs( break if actx is not None: break - assert actx is not None - - from grudge.array_context import MPIPytatoArrayContextBase - - if isinstance(actx, MPIPytatoArrayContextBase): - rbc_class = _RankBoundaryCommunicationLazy - else: - rbc_class = _RankBoundaryCommunicationEager def get_remote_connected_parts(local_vol_dd, remote_vol_dd): connected_part_ids = _connected_parts( @@ -934,6 +933,26 @@ def get_remote_connected_parts(local_vol_dd, remote_vol_dd): for part_id in connected_part_ids if part_id.rank != rank] + if actx is None: + # NOTE: Assumes that the same number is passed on every rank for a + # given volume + return { + (remote_vol_dd, local_vol_dd): [ + TracePair( + local_vol_dd.trace(BTAG_PARTITION(remote_part_id)), + interior=local_vol_ary, exterior=remote_vol_ary) + for remote_part_id in get_remote_connected_parts( + local_vol_dd, remote_vol_dd)] + for (remote_vol_dd, local_vol_dd), (remote_vol_ary, local_vol_ary) + in pairwise_volume_data.items()} + + from grudge.array_context import MPIPytatoArrayContextBase + + if isinstance(actx, MPIPytatoArrayContextBase): + rbc_class = _RankBoundaryCommunicationLazy + else: + rbc_class = _RankBoundaryCommunicationEager + rank_bdry_communicators = {} for vol_dd_pair, vol_data_pair in pairwise_volume_data.items(): From 4cf60fa671a068a3522a422b5c798c81d7ef900f Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:52:43 -0500 Subject: [PATCH 15/30] cosmetic change --- grudge/discretization.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index c05a59c35..e71d1bfb5 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -859,13 +859,14 @@ def get_part_bdry_restriction(self_part_id, other_part_id): part_id = PartID(vtag, rank) connected_part_ids = get_connected_parts(volume_discr.mesh) for connected_part_id in connected_part_ids: - bdry_restr = get_part_bdry_restriction(part_id, connected_part_id) + bdry_restr = get_part_bdry_restriction( + self_part_id=part_id, other_part_id=connected_part_id) if connected_part_id.rank == rank: # {{{ rank-local interface between multiple volumes - rev_bdry_restr = get_part_bdry_restriction( - connected_part_id, part_id) + connected_bdry_restr = get_part_bdry_restriction( + self_part_id=connected_part_id, other_part_id=part_id) from meshmode.discretization.connection import \ make_partition_connection @@ -873,9 +874,9 @@ def get_part_bdry_restriction(self_part_id, other_part_id): make_partition_connection( array_context, local_bdry_conn=bdry_restr, - remote_bdry_discr=rev_bdry_restr.to_discr, + remote_bdry_discr=connected_bdry_restr.to_discr, remote_group_infos=make_remote_group_infos( - array_context, connected_part_id, rev_bdry_restr)) + array_context, connected_part_id, connected_bdry_restr)) # }}} else: From 041cb92ea784db8766edb4e84d4e0bd9ffd3479f Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:52:52 -0500 Subject: [PATCH 16/30] fix bug --- grudge/discretization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grudge/discretization.py b/grudge/discretization.py index e71d1bfb5..0a9703e3c 100644 --- a/grudge/discretization.py +++ b/grudge/discretization.py @@ -876,7 +876,7 @@ def get_part_bdry_restriction(self_part_id, other_part_id): local_bdry_conn=bdry_restr, remote_bdry_discr=connected_bdry_restr.to_discr, remote_group_infos=make_remote_group_infos( - array_context, connected_part_id, connected_bdry_restr)) + array_context, part_id, connected_bdry_restr)) # }}} else: From cb3ebe10c796f37057d0f3fb9a935aa1a6028777 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:53:31 -0500 Subject: [PATCH 17/30] tweak as_dofdesc normalization --- grudge/dof_desc.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/grudge/dof_desc.py b/grudge/dof_desc.py index 46d5553a1..cf285a30e 100644 --- a/grudge/dof_desc.py +++ b/grudge/dof_desc.py @@ -381,16 +381,16 @@ def _normalize_domain_and_discr_tag( if _contextual_volume_tag is None: _contextual_volume_tag = VTAG_ALL - if domain in [DTAG_SCALAR, "scalar"]: + if domain == "scalar": domain = DTAG_SCALAR - elif isinstance(domain, (BoundaryDomainTag, VolumeDomainTag)): + elif isinstance(domain, (ScalarDomainTag, BoundaryDomainTag, VolumeDomainTag)): pass - elif domain == "vol": + elif domain in [VTAG_ALL, "vol"]: domain = DTAG_VOLUME_ALL elif domain in [FACE_RESTR_ALL, "all_faces"]: - domain = BoundaryDomainTag(FACE_RESTR_ALL) + domain = BoundaryDomainTag(FACE_RESTR_ALL, _contextual_volume_tag) elif domain in [FACE_RESTR_INTERIOR, "int_faces"]: - domain = BoundaryDomainTag(FACE_RESTR_INTERIOR) + domain = BoundaryDomainTag(FACE_RESTR_INTERIOR, _contextual_volume_tag) elif isinstance(domain, BTAG_PARTITION): domain = BoundaryDomainTag(domain, _contextual_volume_tag) elif domain in [BTAG_ALL, BTAG_REALLY_ALL, BTAG_NONE]: From fa17cd14f4a9ee8e757b35987b521cc35f2c34a9 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:56:23 -0500 Subject: [PATCH 18/30] fix bugs --- grudge/trace_pair.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 76b4c6876..e84ce2c87 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -64,7 +64,6 @@ ArrayContext, with_container_arithmetic, dataclass_array_container, - get_container_context_recursively, get_container_context_recursively_opt, to_numpy, from_numpy, @@ -261,7 +260,7 @@ def bv_trace_pair( DeprecationWarning, stacklevel=2) dd = dof_desc.as_dofdesc(dd) return bdry_trace_pair( - dcoll, dd, project(dcoll, "vol", dd, interior), exterior) + dcoll, dd, project(dcoll, dd.domain_tag.volume_tag, dd, interior), exterior) # }}} @@ -470,7 +469,7 @@ def inter_volume_trace_pairs(dcoll: DiscretizationCollection, result[directional_vol_dd_pair] = [tpair] for directional_vol_dd_pair, tpairs in cross_rank_tpairs.items(): - result.setdefault(directional_vol_dd_pair, []).append(tpairs) + result.setdefault(directional_vol_dd_pair, []).extend(tpairs) return result @@ -832,7 +831,7 @@ def cross_rank_trace_pairs( # (Since we have only a single tag.) assert len(remote_part_ids) == len({part_id.rank for part_id in remote_part_ids}) - actx = get_container_context_recursively(ary) + actx = get_container_context_recursively_opt(ary) if actx is None: # NOTE: Assumes that the same number is passed on every rank From 5aa6c1dd1d3d5712a8a14b24280e0cf4d1b60498 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 18 Apr 2022 11:44:43 -0500 Subject: [PATCH 19/30] don't try to create trace pair if there's no adjacency --- grudge/trace_pair.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index e84ce2c87..d3a5c42c2 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -391,6 +391,12 @@ def local_inter_volume_trace_pairs( result: Mapping[Tuple[DOFDesc, DOFDesc], TracePair] = {} for vol_dd_pair, vol_data_pair in pairwise_volume_data.items(): + from meshmode.mesh import mesh_has_boundary + if not mesh_has_boundary( + dcoll.discr_from_dd(vol_dd_pair[0]).mesh, + BTAG_PARTITION(PartID(vol_dd_pair[1].domain_tag.tag, rank))): + continue + directional_vol_dd_pairs = [ (vol_dd_pair[1], vol_dd_pair[0]), (vol_dd_pair[0], vol_dd_pair[1])] From e34b30dee6d9b2f25c1a8f1c28b5982ee5fa4ef3 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 2 May 2022 10:14:07 -0500 Subject: [PATCH 20/30] forget about heterogeneous inter-volume trace pairs for now --- grudge/trace_pair.py | 54 ++++++++++++++++++++++---------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index d3a5c42c2..fcc590744 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -682,48 +682,48 @@ def __init__(self, self.local_part_id = local_part_id self.remote_part_id = remote_part_id - from pytato import make_distributed_recv, staple_distributed_send + from pytato import ( + make_distributed_recv, + make_distributed_send, + DistributedSendRefHolder) + + # TODO: This currently assumes that local_bdry_data and + # remote_bdry_data_template have the same structure. This is not true + # in general. Find a way to staple the sends appropriately when the number + # of recvs is not equal to the number of sends + assert type(local_bdry_data) == type(remote_bdry_data_template) + + sends = {} - # Staple the sends to a bunch of dummy arrays of zeros def send_single_array(key, local_subary): if isinstance(local_subary, Number): - return 0 + return else: ary_tag = (comm_tag, key) - return staple_distributed_send( - local_subary, dest_rank=remote_rank, comm_tag=ary_tag, - stapled_to=actx.zeros_like(local_subary)) + sends[key] = make_distributed_send( + local_subary, dest_rank=remote_rank, comm_tag=ary_tag) def recv_single_array(key, remote_subary_template): if isinstance(remote_subary_template, Number): # NOTE: Assumes that the same number is passed on every rank - return remote_subary_template + return Number else: ary_tag = (comm_tag, key) - return make_distributed_recv( - src_rank=remote_rank, comm_tag=ary_tag, - shape=remote_subary_template.shape, - dtype=remote_subary_template.dtype, - axes=remote_subary_template.axes) + return DistributedSendRefHolder( + sends[key], + make_distributed_recv( + src_rank=remote_rank, comm_tag=ary_tag, + shape=remote_subary_template.shape, + dtype=remote_subary_template.dtype, + axes=remote_subary_template.axes)) from arraycontext.container.traversal import rec_keyed_map_array_container - zeros_like_local_bdry_data = rec_keyed_map_array_container( - send_single_array, local_bdry_data) - unswapped_remote_bdry_data = rec_keyed_map_array_container( - recv_single_array, remote_bdry_data_template) - # Sum up the dummy zeros - zero = actx.np.sum(zeros_like_local_bdry_data) + rec_keyed_map_array_container(send_single_array, local_bdry_data) + self.local_bdry_data = local_bdry_data - # Add the dummy zeros and hope that the caller proceeds to actually - # use some of this data on every rank... - from arraycontext import rec_map_array_container - self.local_bdry_data = rec_map_array_container( - lambda x: x + zero, - local_bdry_data) - self.unswapped_remote_bdry_data = rec_map_array_container( - lambda x: x + zero, - unswapped_remote_bdry_data) + self.unswapped_remote_bdry_data = rec_keyed_map_array_container( + recv_single_array, remote_bdry_data_template) def finish(self): remote_to_local = self.dcoll._inter_part_connections[ From fb44700749ffd803a4138c7995921a11d9c00654 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 4 May 2022 15:39:10 -0500 Subject: [PATCH 21/30] add FIXME --- grudge/trace_pair.py | 1 + 1 file changed, 1 insertion(+) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index fcc590744..1ab14f511 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -691,6 +691,7 @@ def __init__(self, # remote_bdry_data_template have the same structure. This is not true # in general. Find a way to staple the sends appropriately when the number # of recvs is not equal to the number of sends + # FIXME: Overly restrictive (just needs to be the same structure) assert type(local_bdry_data) == type(remote_bdry_data_template) sends = {} From c3bb93c9e4acf53a278137e95545026e2c6a98b3 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Wed, 4 May 2022 15:39:22 -0500 Subject: [PATCH 22/30] fix bug --- grudge/trace_pair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 1ab14f511..6fef5ab10 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -707,7 +707,7 @@ def send_single_array(key, local_subary): def recv_single_array(key, remote_subary_template): if isinstance(remote_subary_template, Number): # NOTE: Assumes that the same number is passed on every rank - return Number + return remote_subary_template else: ary_tag = (comm_tag, key) return DistributedSendRefHolder( From 7ef21bac1c7a3360e317b10b6a98ec7b896491a1 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Sun, 3 Jul 2022 00:41:46 -0500 Subject: [PATCH 23/30] ignore flake8-bugbear error --- grudge/trace_pair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 6fef5ab10..2e6742e2d 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -431,7 +431,7 @@ def get_opposite_trace(ary): if isinstance(ary, Number): return ary else: - return other_to_self(ary) + return other_to_self(ary) # noqa: B023 from arraycontext import rec_map_array_container from meshmode.dof_array import DOFArray From 12a64438282e79ad642400c4c23b759da34ca881 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Tue, 9 Aug 2022 10:39:16 -0500 Subject: [PATCH 24/30] fix bug --- grudge/trace_pair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index 2e6742e2d..eee43b9f8 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -918,7 +918,7 @@ def cross_rank_inter_volume_trace_pairs( # }}} if dcoll.mpi_communicator is None: - return [] + return {} rank = dcoll.mpi_communicator.Get_rank() From ecfccfe4effc4505627bafd44f33c34b93a70885 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:57:01 -0500 Subject: [PATCH 25/30] add volume_dd to make_visualizer --- grudge/shortcuts.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/grudge/shortcuts.py b/grudge/shortcuts.py index 0aca64a58..e6e62cc55 100644 --- a/grudge/shortcuts.py +++ b/grudge/shortcuts.py @@ -20,6 +20,8 @@ THE SOFTWARE. """ +from grudge.dof_desc import DD_VOLUME_ALL + from pytools import memoize_in @@ -76,11 +78,14 @@ def set_up_rk4(field_var_name, dt, fields, rhs, t_start=0.0): return dt_stepper -def make_visualizer(dcoll, vis_order=None, **kwargs): +def make_visualizer(dcoll, vis_order=None, volume_dd=None, **kwargs): from meshmode.discretization.visualization import make_visualizer + if volume_dd is None: + volume_dd = DD_VOLUME_ALL + return make_visualizer( dcoll._setup_actx, - dcoll.discr_from_dd("vol"), vis_order, **kwargs) + dcoll.discr_from_dd(volume_dd), vis_order, **kwargs) def make_boundary_visualizer(dcoll, vis_order=None, **kwargs): From 35c2f11129404cecd6fa8a2116f52944f12f0627 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 1 Apr 2022 10:57:24 -0500 Subject: [PATCH 26/30] implement multi-volume support in op --- grudge/eager.py | 17 +++---- grudge/op.py | 124 +++++++++++++++++++++++++++++++++++------------- 2 files changed, 99 insertions(+), 42 deletions(-) diff --git a/grudge/eager.py b/grudge/eager.py index 400fee355..2175592d4 100644 --- a/grudge/eager.py +++ b/grudge/eager.py @@ -47,14 +47,14 @@ def __init__(self, *args, **kwargs): def project(self, src, tgt, vec): return op.project(self, src, tgt, vec) - def grad(self, vec): - return op.local_grad(self, vec) + def grad(self, *args): + return op.local_grad(self, *args) - def d_dx(self, xyz_axis, vec): - return op.local_d_dx(self, xyz_axis, vec) + def d_dx(self, xyz_axis, *args): + return op.local_d_dx(self, xyz_axis, *args) - def div(self, vecs): - return op.local_div(self, vecs) + def div(self, *args): + return op.local_div(self, *args) def weak_grad(self, *args): return op.weak_local_grad(self, *args) @@ -68,8 +68,8 @@ def weak_div(self, *args): def mass(self, *args): return op.mass(self, *args) - def inverse_mass(self, vec): - return op.inverse_mass(self, vec) + def inverse_mass(self, *args): + return op.inverse_mass(self, *args) def face_mass(self, *args): return op.face_mass(self, *args) @@ -89,5 +89,6 @@ def nodal_max(self, dd, vec): interior_trace_pair = op.interior_trace_pair cross_rank_trace_pairs = op.cross_rank_trace_pairs +inter_volume_trace_pairs = op.inter_volume_trace_pairs # vim: foldmethod=marker diff --git a/grudge/op.py b/grudge/op.py index 967a66ac3..862b201dd 100644 --- a/grudge/op.py +++ b/grudge/op.py @@ -81,6 +81,7 @@ DiscretizationFaceAxisTag) from grudge.discretization import DiscretizationCollection +from grudge.dof_desc import as_dofdesc from pytools import keyed_memoize_in from pytools.obj_array import make_obj_array @@ -88,7 +89,10 @@ import numpy as np import grudge.dof_desc as dof_desc -from grudge.dof_desc import DD_VOLUME_ALL, FACE_RESTR_ALL +from grudge.dof_desc import ( + DD_VOLUME_ALL, FACE_RESTR_ALL, DISCR_TAG_BASE, + DOFDesc, VolumeDomainTag +) from grudge.interpolation import interp from grudge.projection import project @@ -249,14 +253,14 @@ def get_ref_derivative_mats(grp): def _strong_scalar_grad(dcoll, dd_in, vec): - assert dd_in == dof_desc.as_dofdesc(DD_VOLUME_ALL) + assert isinstance(dd_in.domain_tag, VolumeDomainTag) from grudge.geometry import inverse_surface_metric_derivative_mat - discr = dcoll.discr_from_dd(DD_VOLUME_ALL) + discr = dcoll.discr_from_dd(dd_in) actx = vec.array_context - inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, + inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return _gradient_kernel(actx, discr, discr, _reference_derivative_matrices, inverse_jac_mat, vec, @@ -264,7 +268,7 @@ def _strong_scalar_grad(dcoll, dd_in, vec): def local_grad( - dcoll: DiscretizationCollection, vec, *, nested=False) -> ArrayOrContainer: + dcoll: DiscretizationCollection, *args, nested=False) -> ArrayOrContainer: r"""Return the element-local gradient of a function :math:`f` represented by *vec*: @@ -273,15 +277,26 @@ def local_grad( \nabla|_E f = \left( \partial_x|_E f, \partial_y|_E f, \partial_z|_E f \right) + May be called with ``(vec)`` or ``(dd_in, vec)``. + :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. + :arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. + Defaults to the base volume discretization if not provided. :arg nested: return nested object arrays instead of a single multidimensional array if *vec* is non-scalar. :returns: an object array (possibly nested) of :class:`~meshmode.dof_array.DOFArray`\ s or :class:`~arraycontext.ArrayContainer` of object arrays. """ - dd_in = DD_VOLUME_ALL + if len(args) == 1: + vec, = args + dd_in = DD_VOLUME_ALL + elif len(args) == 2: + dd_in, vec = args + else: + raise TypeError("invalid number of arguments") + from grudge.tools import rec_map_subarrays return rec_map_subarrays( partial(_strong_scalar_grad, dcoll, dd_in), @@ -290,7 +305,7 @@ def local_grad( def local_d_dx( - dcoll: DiscretizationCollection, xyz_axis, vec) -> ArrayOrContainer: + dcoll: DiscretizationCollection, xyz_axis, *args) -> ArrayOrContainer: r"""Return the element-local derivative along axis *xyz_axis* of a function :math:`f` represented by *vec*: @@ -298,22 +313,34 @@ def local_d_dx( \frac{\partial f}{\partial \lbrace x,y,z\rbrace}\Big|_E + May be called with ``(vec)`` or ``(dd, vec)``. + :arg xyz_axis: an integer indicating the axis along which the derivative is taken. + :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. + Defaults to the base volume discretization if not provided. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. """ + if len(args) == 1: + vec, = args + dd = DD_VOLUME_ALL + elif len(args) == 2: + dd, vec = args + else: + raise TypeError("invalid number of arguments") + if not isinstance(vec, DOFArray): - return map_array_container(partial(local_d_dx, dcoll, xyz_axis), vec) + return map_array_container(partial(local_d_dx, dcoll, xyz_axis, dd), vec) - discr = dcoll.discr_from_dd(DD_VOLUME_ALL) + discr = dcoll.discr_from_dd(dd) actx = vec.array_context from grudge.geometry import inverse_surface_metric_derivative_mat - inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, - _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) + inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd, + _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return _single_axis_derivative_kernel( actx, discr, discr, @@ -321,7 +348,7 @@ def local_d_dx( metric_in_matvec=False) -def local_div(dcoll: DiscretizationCollection, vecs) -> ArrayOrContainer: +def local_div(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: r"""Return the element-local divergence of the vector function :math:`\mathbf{f}` represented by *vecs*: @@ -329,6 +356,10 @@ def local_div(dcoll: DiscretizationCollection, vecs) -> ArrayOrContainer: \nabla|_E \cdot \mathbf{f} = \sum_{i=1}^d \partial_{x_i}|_E \mathbf{f}_i + May be called with ``(vec)`` or ``(dd, vec)``. + + :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. + Defaults to the base volume discretization if not provided. :arg vecs: an object array of :class:`~meshmode.dof_array.DOFArray`\s or an :class:`~arraycontext.ArrayContainer` object @@ -337,13 +368,21 @@ def local_div(dcoll: DiscretizationCollection, vecs) -> ArrayOrContainer: :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. """ + if len(args) == 1: + vec, = args + dd = DD_VOLUME_ALL + elif len(args) == 2: + dd, vec = args + else: + raise TypeError("invalid number of arguments") + from grudge.tools import rec_map_subarrays return rec_map_subarrays( lambda vec: sum( - local_d_dx(dcoll, i, vec_i) + local_d_dx(dcoll, i, dd, vec_i) for i, vec_i in enumerate(vec)), (dcoll.ambient_dim,), (), - vecs, scalar_cls=DOFArray) + vec, scalar_cls=DOFArray) # }}} @@ -396,8 +435,9 @@ def get_ref_stiffness_transpose_mat(out_grp, in_grp): def _weak_scalar_grad(dcoll, dd_in, vec): from grudge.geometry import inverse_surface_metric_derivative_mat + dd_in = as_dofdesc(dd_in) in_discr = dcoll.discr_from_dd(dd_in) - out_discr = dcoll.discr_from_dd(DD_VOLUME_ALL) + out_discr = dcoll.discr_from_dd(dd_in.with_discr_tag(DISCR_TAG_BASE)) actx = vec.array_context inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in, @@ -493,8 +533,9 @@ def weak_local_d_dx(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: from grudge.geometry import inverse_surface_metric_derivative_mat + dd_in = as_dofdesc(dd_in) in_discr = dcoll.discr_from_dd(dd_in) - out_discr = dcoll.discr_from_dd(DD_VOLUME_ALL) + out_discr = dcoll.discr_from_dd(dd_in.with_discr_tag(DISCR_TAG_BASE)) actx = vec.array_context inverse_jac_mat = inverse_surface_metric_derivative_mat(actx, dcoll, dd=dd_in, @@ -633,7 +674,7 @@ def mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: *vec* being an :class:`~arraycontext.ArrayContainer`, the mass operator is applied component-wise. - May be called with ``(vec)`` or ``(dd, vec)``. + May be called with ``(vec)`` or ``(dd_in, vec)``. Specifically, this function applies the mass matrix elementwise on a vector of coefficients :math:`\mathbf{f}` via: @@ -645,7 +686,7 @@ def mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: where :math:`\phi_i` are local polynomial basis functions on :math:`E`. - :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. + :arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. @@ -655,13 +696,15 @@ def mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: if len(args) == 1: vec, = args - dd = dof_desc.DD_VOLUME_ALL + dd_in = dof_desc.DD_VOLUME_ALL elif len(args) == 2: - dd, vec = args + dd_in, vec = args else: raise TypeError("invalid number of arguments") - return _apply_mass_operator(dcoll, DD_VOLUME_ALL, dd, vec) + dd_out = dd_in.with_discr_tag(DISCR_TAG_BASE) + + return _apply_mass_operator(dcoll, dd_out, dd_in, vec) # }}} @@ -719,7 +762,7 @@ def _apply_inverse_mass_operator( return DOFArray(actx, data=tuple(group_data)) -def inverse_mass(dcoll: DiscretizationCollection, vec) -> ArrayOrContainer: +def inverse_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: r"""Return the action of the DG mass matrix inverse on a vector (or vectors) of :class:`~meshmode.dof_array.DOFArray`\ s, *vec*. In the case of *vec* being an :class:`~arraycontext.ArrayContainer`, @@ -749,15 +792,24 @@ def inverse_mass(dcoll: DiscretizationCollection, vec) -> ArrayOrContainer: where :math:`\widehat{\mathbf{M}}` is the reference mass matrix on :math:`\widehat{E}`. + May be called with ``(vec)`` or ``(dd, vec)``. + :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` of them. + :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. + Defaults to the base volume discretization if not provided. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.ArrayContainer` like *vec*. """ + if len(args) == 1: + vec, = args + dd = DD_VOLUME_ALL + elif len(args) == 2: + dd, vec = args + else: + raise TypeError("invalid number of arguments") - return _apply_inverse_mass_operator( - dcoll, DD_VOLUME_ALL, DD_VOLUME_ALL, vec - ) + return _apply_inverse_mass_operator(dcoll, dd, dd, vec) # }}} @@ -855,21 +907,25 @@ def get_ref_face_mass_mat(face_grp, vol_grp): return get_ref_face_mass_mat(face_element_group, vol_element_group) -def _apply_face_mass_operator(dcoll: DiscretizationCollection, dd, vec): +def _apply_face_mass_operator(dcoll: DiscretizationCollection, dd_in, vec): if not isinstance(vec, DOFArray): return map_array_container( - partial(_apply_face_mass_operator, dcoll, dd), vec + partial(_apply_face_mass_operator, dcoll, dd_in), vec ) from grudge.geometry import area_element - volm_discr = dcoll.discr_from_dd(DD_VOLUME_ALL) - face_discr = dcoll.discr_from_dd(dd) + dd_out = DOFDesc( + VolumeDomainTag(dd_in.domain_tag.volume_tag), + DISCR_TAG_BASE) + + volm_discr = dcoll.discr_from_dd(dd_out) + face_discr = dcoll.discr_from_dd(dd_in) dtype = vec.entry_dtype actx = vec.array_context assert len(face_discr.groups) == len(volm_discr.groups) - surf_area_elements = area_element(actx, dcoll, dd=dd, + surf_area_elements = area_element(actx, dcoll, dd=dd_in, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return DOFArray( @@ -906,7 +962,7 @@ def face_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: *vec* being an arbitrary :class:`~arraycontext.ArrayContainer`, the face mass operator is applied component-wise. - May be called with ``(vec)`` or ``(dd, vec)``. + May be called with ``(vec)`` or ``(dd_in, vec)``. Specifically, this function applies the face mass matrix elementwise on a vector of coefficients :math:`\mathbf{f}` as the sum of contributions for @@ -937,13 +993,13 @@ def face_mass(dcoll: DiscretizationCollection, *args) -> ArrayOrContainer: if len(args) == 1: vec, = args - dd = DD_VOLUME_ALL.trace(FACE_RESTR_ALL) + dd_in = DD_VOLUME_ALL.trace(FACE_RESTR_ALL) elif len(args) == 2: - dd, vec = args + dd_in, vec = args else: raise TypeError("invalid number of arguments") - return _apply_face_mass_operator(dcoll, dd, vec) + return _apply_face_mass_operator(dcoll, dd_in, vec) # }}} From 5551ac0bdd6d6352886da2b2db298721cfe6fa0a Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 8 Apr 2022 10:46:01 -0500 Subject: [PATCH 27/30] fix doc in characteristic_lengthscales --- grudge/dt_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grudge/dt_utils.py b/grudge/dt_utils.py index a108cddbd..8f6b1712a 100644 --- a/grudge/dt_utils.py +++ b/grudge/dt_utils.py @@ -82,7 +82,7 @@ def characteristic_lengthscales( node distance on the reference cell (see :func:`dt_non_geometric_factors`), and :math:`r_D` is the inradius of the cell (see :func:`dt_geometric_factors`). - :returns: a frozen :class:`~meshmode.dof_array.DOFArray` containing a + :returns: a :class:`~meshmode.dof_array.DOFArray` containing a characteristic lengthscale for each element, at each nodal location. .. note:: From edb13ad79fa21537590c0dcb16b4aecf4ed39985 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Fri, 8 Apr 2022 10:46:15 -0500 Subject: [PATCH 28/30] fix memoization in a couple of spots --- grudge/dt_utils.py | 2 +- grudge/reductions.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/grudge/dt_utils.py b/grudge/dt_utils.py index 8f6b1712a..817828234 100644 --- a/grudge/dt_utils.py +++ b/grudge/dt_utils.py @@ -94,7 +94,7 @@ def characteristic_lengthscales( methods has been used as a guide. Any concrete time integrator will likely require scaling of the values returned by this routine. """ - @memoize_in(dcoll, (characteristic_lengthscales, + @memoize_in(dcoll, (characteristic_lengthscales, dd, "compute_characteristic_lengthscales")) def _compute_characteristic_lengthscales(): return actx.freeze( diff --git a/grudge/reductions.py b/grudge/reductions.py index ab106c8c4..6087b5725 100644 --- a/grudge/reductions.py +++ b/grudge/reductions.py @@ -344,7 +344,7 @@ def _apply_elementwise_reduction( ) ) else: - @memoize_in(actx, (_apply_elementwise_reduction, + @memoize_in(actx, (_apply_elementwise_reduction, dd, "elementwise_%s_prg" % op_name)) def elementwise_prg(): # FIXME: This computes the reduction value redundantly for each From e253b4b82dd91d065bac9c8b423a93deec34be7d Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Sun, 3 Jul 2022 00:34:08 -0500 Subject: [PATCH 29/30] get contextual volume tags for BoundayDomainTags as well needed for projection from boundary to all faces --- grudge/projection.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/grudge/projection.py b/grudge/projection.py index ba8d4bc3c..e21e02295 100644 --- a/grudge/projection.py +++ b/grudge/projection.py @@ -37,7 +37,11 @@ from arraycontext import ArrayOrContainer from grudge.discretization import DiscretizationCollection -from grudge.dof_desc import as_dofdesc, VolumeDomainTag, ConvertibleToDOFDesc +from grudge.dof_desc import ( + as_dofdesc, + VolumeDomainTag, + BoundaryDomainTag, + ConvertibleToDOFDesc) from numbers import Number @@ -64,6 +68,8 @@ def project( contextual_volume_tag = None if isinstance(src_dofdesc.domain_tag, VolumeDomainTag): contextual_volume_tag = src_dofdesc.domain_tag.tag + elif isinstance(src_dofdesc.domain_tag, BoundaryDomainTag): + contextual_volume_tag = src_dofdesc.domain_tag.volume_tag tgt_dofdesc = as_dofdesc(tgt, _contextual_volume_tag=contextual_volume_tag) From 9aa087b71b418ec28fb291ba806f1203bc1f5978 Mon Sep 17 00:00:00 2001 From: Matthew Smith Date: Mon, 9 May 2022 14:36:46 -0500 Subject: [PATCH 30/30] tag some axes --- grudge/trace_pair.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/grudge/trace_pair.py b/grudge/trace_pair.py index eee43b9f8..a20a17643 100644 --- a/grudge/trace_pair.py +++ b/grudge/trace_pair.py @@ -862,11 +862,20 @@ def cross_rank_trace_pairs( local_bdry_data = project(dcoll, volume_dd, bdry_dd, ary) - remote_bdry_data_template = _replace_dof_arrays( - local_bdry_data, + from arraycontext import tag_axes + from meshmode.transform_metadata import ( + DiscretizationElementAxisTag, + DiscretizationDOFAxisTag) + remote_bdry_zeros = tag_axes( + actx, { + 0: DiscretizationElementAxisTag(), + 1: DiscretizationDOFAxisTag()}, dcoll._inter_part_connections[ remote_part_id, local_part_id].from_discr.zeros(actx)) + remote_bdry_data_template = _replace_dof_arrays( + local_bdry_data, remote_bdry_zeros) + rank_bdry_communicators.append( rbc_class(actx, dcoll, local_part_id=local_part_id, @@ -980,16 +989,20 @@ def get_remote_connected_parts(local_vol_dd, remote_vol_dd): self_bdry_data = project( dcoll, self_vol_dd, self_bdry_dd, self_vol_data) - other_bdry_template_dd = other_vol_dd.trace( - BTAG_PARTITION(self_part_id)) - other_bdry_container_template = project( - dcoll, other_vol_dd, other_bdry_template_dd, - other_vol_data) - other_bdry_data_template = _replace_dof_arrays( - other_bdry_container_template, + from arraycontext import tag_axes + from meshmode.transform_metadata import ( + DiscretizationElementAxisTag, + DiscretizationDOFAxisTag) + other_bdry_zeros = tag_axes( + actx, { + 0: DiscretizationElementAxisTag(), + 1: DiscretizationDOFAxisTag()}, dcoll._inter_part_connections[ other_part_id, self_part_id].from_discr.zeros(actx)) + other_bdry_data_template = _replace_dof_arrays( + other_vol_data, other_bdry_zeros) + rbcs.append( rbc_class(actx, dcoll, local_part_id=self_part_id,