From e8531e3db02524f7118ab8d505684afaba7549c6 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Thu, 23 May 2024 19:45:31 -0400 Subject: [PATCH] rebase with prt components, remove mock data and use real models for tests --- autotest/test_mp6.py | 6 +- autotest/test_plotutil.py | 145 ++++++++---- autotest/test_prtfile.py | 343 ++++++++++++++++++++++++----- flopy/plot/plotutil.py | 12 +- flopy/utils/modpathfile.py | 365 +++++++++++++++---------------- flopy/utils/particletrackfile.py | 35 ++- flopy/utils/prtfile.py | 57 ++++- 7 files changed, 656 insertions(+), 307 deletions(-) diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 11e8b90342..d04ac68c1a 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -310,7 +310,11 @@ def test_loadtxt(function_tmpdir, mp6_test_path): pthld = PathlineFile(pthfile) ra = loadtxt(pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype) ra2 = loadtxt( - pthfile, delimiter=" ", skiprows=3, dtype=pthld._dtype, use_pandas=False + pthfile, + delimiter=" ", + skiprows=3, + dtype=pthld._dtype, + use_pandas=False, ) assert np.array_equal(ra, ra2) diff --git a/autotest/test_plotutil.py b/autotest/test_plotutil.py index 9ed040c549..de4f05daa7 100644 --- a/autotest/test_plotutil.py +++ b/autotest/test_plotutil.py @@ -14,7 +14,6 @@ from flopy.utils.modpathfile import PathlineFile as MpPathlineFile from flopy.utils.prtfile import PathlineFile as PrtPathlineFile - nlay = 1 nrow = 10 ncol = 10 @@ -73,9 +72,7 @@ def gwf_sim(function_tmpdir): pname="tdis", time_units="DAYS", nper=nper, - perioddata=[ - (perlen, nstp, tsmult) - ], + perioddata=[(perlen, nstp, tsmult)], ) # create gwf model @@ -131,9 +128,10 @@ def gwf_sim(function_tmpdir): @pytest.fixture -def mp7_sim(gwf_sim, function_tmpdir): +def mp7_sim(gwf_sim): gwf = gwf_sim.get_model() - mp7_ws = function_tmpdir / "mp7" + ws = gwf_sim.sim_path.parent + mp7_ws = ws / "mp7" releasepts_mp7 = [ # node number, localx, localy, localz (0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) @@ -172,9 +170,10 @@ def mp7_sim(gwf_sim, function_tmpdir): @pytest.fixture -def prt_sim(function_tmpdir): - gwf_ws = function_tmpdir / "gwf" - prt_ws = function_tmpdir / "prt" +def prt_sim(gwf_sim): + ws = gwf_sim.sim_path.parent + gwf_ws = ws / "gwf" + prt_ws = ws / "prt" prt_name = "plotutil_prt" gwf_name = "plotutil_gwf" releasepts_prt = [ @@ -219,9 +218,7 @@ def prt_sim(function_tmpdir): ) # create mip package - flopy.mf6.ModflowPrtmip( - prt, pname="mip", porosity=porosity - ) + flopy.mf6.ModflowPrtmip(prt, pname="mip", porosity=porosity) # create prp package prp_track_file = f"{prt_name}.prp.trk" @@ -276,8 +273,16 @@ def prt_sim(function_tmpdir): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines(prt_sim, dataframe): +def test_to_mp7_pathlines(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") + if not dataframe: + prt_pls = prt_pls.to_records(index=False) mp7_pls = to_mp7_pathlines(prt_pls) assert ( @@ -291,13 +296,18 @@ def test_to_mp7_pathlines(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_empty(dataframe): +def test_to_mp7_pathlines_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") prt_pls = ( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=prt_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=prt_pls.to_records(index=False).dtype) ) mp7_pls = to_mp7_pathlines(prt_pls) @@ -308,32 +318,50 @@ def test_to_mp7_pathlines_empty(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_pathlines_noop(mp7_sim, dataframe): - plf = flopy.utils.PathlineFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mppth") - og_pls = pd.DataFrame( - plf.get_destination_pathline_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) +def test_to_mp7_pathlines_noop(gwf_sim, mp7_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + mp7_sim.write_input() + mp7_sim.run_model() + + plf = flopy.utils.PathlineFile( + Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mppth" + ) + og_pls = plf.get_destination_pathline_data( + range(gwf_sim.get_model().modelgrid.nnodes), to_recarray=True ) + if dataframe: + og_pls = pd.DataFrame(og_pls) mp7_pls = to_mp7_pathlines(og_pls) assert ( type(mp7_pls) - == type(mp7_pls) + == type(og_pls) == (pd.DataFrame if dataframe else np.recarray) ) assert set( dict(mp7_pls.dtypes).keys() if dataframe else mp7_pls.dtype.names ) == set(MpPathlineFile.dtypes[7].names) assert np.array_equal( - mp7_pls if dataframe else pd.DataFrame(mp7_pls), og_pls + pd.DataFrame(mp7_pls) if dataframe else mp7_pls, og_pls ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints(prt_sim, dataframe): +def test_to_mp7_endpoints(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") prt_eps = prt_pls[prt_pls.ireason == 3] - mp7_eps = to_mp7_endpoints(mp7_eps) - + if not dataframe: + prt_eps = prt_eps.to_records(index=False) + mp7_eps = to_mp7_endpoints(prt_eps) + assert np.isclose(mp7_eps.time[0], prt_eps.t.max()) assert set( dict(mp7_eps.dtypes).keys() if dataframe else mp7_eps.dtype.names @@ -341,13 +369,18 @@ def test_to_mp7_endpoints(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_empty(dataframe): +def test_to_mp7_endpoints_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + prt_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") mp7_eps = to_mp7_endpoints( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=prt_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=prt_pls.to_records(index=False).dtype) ) assert mp7_eps.empty if dataframe else mp7_eps.size == 0 @@ -357,26 +390,41 @@ def test_to_mp7_endpoints_empty(dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_mp7_endpoints_noop(mp7_sim, dataframe): +def test_to_mp7_endpoints_noop(gwf_sim, mp7_sim, dataframe): """Test a recarray or dataframe which already contains MP7 endpoint data""" - epf = flopy.utils.EndpointFile(Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mpend") - og_eps = pd.DataFrame( - epf.get_destination_endpoint_data(range(mp7_sim.modelgrid.nnodes), to_recarray=True) + + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + mp7_sim.write_input() + mp7_sim.run_model() + + epf = flopy.utils.EndpointFile( + Path(mp7_sim.model_ws) / f"{mp7_sim.name}.mpend" ) + og_eps = epf.get_destination_endpoint_data( + range(gwf_sim.get_model().modelgrid.nnodes) + ) + if dataframe: + og_eps = pd.DataFrame(og_eps) mp7_eps = to_mp7_endpoints(og_eps) assert np.array_equal( - mp7_eps if dataframe else pd.DataFrame(mp7_eps), og_eps + pd.DataFrame(mp7_eps) if dataframe else mp7_eps, og_eps ) @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip(prt_sim, dataframe): +def test_to_prt_pathlines_roundtrip(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + og_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") mp7_pls = to_mp7_pathlines( - og_pls - if dataframe - else og_pls.to_records(index=False) + og_pls if dataframe else og_pls.to_records(index=False) ) prt_pls = to_prt_pathlines(mp7_pls) @@ -395,13 +443,18 @@ def test_to_prt_pathlines_roundtrip(prt_sim, dataframe): @pytest.mark.parametrize("dataframe", [True, False]) -def test_to_prt_pathlines_roundtrip_empty(dataframe): +def test_to_prt_pathlines_roundtrip_empty(gwf_sim, prt_sim, dataframe): + gwf_sim.write_simulation() + gwf_sim.run_simulation() + + prt_sim.write_simulation() + prt_sim.run_simulation() + + og_pls = pd.read_csv(prt_sim.sim_path / f"{prt_sim.name}.trk.csv") og_pls = to_mp7_pathlines( - pd.DataFrame.from_records( - [], columns=PrtPathlineFile.dtype.names - ) + pd.DataFrame.from_records([], columns=og_pls.dtypes) if dataframe - else np.recarray((0,), dtype=PrtPathlineFile.dtype) + else np.recarray((0,), dtype=og_pls.to_records(index=False).dtype) ) prt_pls = to_prt_pathlines(og_pls) diff --git a/autotest/test_prtfile.py b/autotest/test_prtfile.py index 61c5dfda2c..d15aab21dc 100644 --- a/autotest/test_prtfile.py +++ b/autotest/test_prtfile.py @@ -1,60 +1,297 @@ import pytest +import flopy from autotest.conftest import get_project_root_path from flopy.utils.prtfile import PathlineFile pytestmark = pytest.mark.mf6 proj_root = get_project_root_path() -prt_data_path = ( - proj_root / "examples" / "data" / "mf6" / "prt_data" / "001" -) - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_init(path, header_path): - file = PathlineFile(path, header_filename=header_path) - assert file.fname == path - if path.suffix == ".csv": - assert len(file._data) == len(open(path).readlines()) - 1 - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_intersect(path, header_path): - file = PathlineFile(path, header_filename=header_path) - nodes = [1, 11, 21] - intersection = file.intersect(nodes) - assert any(intersection) - assert all(d.icell in nodes for d in intersection.itertuples()) - - -@pytest.mark.parametrize( - "path, header_path", - [ - ( - prt_data_path / "prt001.trk", - prt_data_path / "prt001.trk.hdr", - ), - (prt_data_path / "prt001.trk.csv", None), - ], -) -def test_validate(path, header_path): - file = PathlineFile(path, header_filename=header_path) - file.validate() + +nlay = 1 +nrow = 10 +ncol = 10 +top = 1.0 +botm = [0.0] +nper = 1 +perlen = 1.0 +nstp = 1 +tsmult = 1.0 +porosity = 0.1 + + +def get_partdata(grid, rpts): + """ + Make a flopy.modpath.ParticleData from the given grid and release points. + """ + + if grid.grid_type == "structured": + return flopy.modpath.ParticleData( + partlocs=[grid.get_lrc(p[0])[0] for p in rpts], + structured=True, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + else: + return flopy.modpath.ParticleData( + partlocs=[p[0] for p in rpts], + structured=False, + localx=[p[1] for p in rpts], + localy=[p[2] for p in rpts], + localz=[p[3] for p in rpts], + timeoffset=0, + drape=0, + ) + + +@pytest.fixture +def gwf_sim(function_tmpdir): + gwf_ws = function_tmpdir / "gwf" + gwf_name = "plotutil_gwf" + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=gwf_name, + exe_name="mf6", + version="mf6", + sim_ws=gwf_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[(perlen, nstp, tsmult)], + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf(sim, modelname=gwf_name, save_flows=True) + + # create gwf discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + gwf, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create gwf initial conditions package + flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic") + + # create gwf node property flow package + flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( + gwf, + pname="npf", + save_saturation=True, + save_specific_discharge=True, + ) + + # create gwf chd package + spd = { + 0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], + 1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], + } + chd = flopy.mf6.ModflowGwfchd( + gwf, + pname="CHD-1", + stress_period_data=spd, + auxiliary=["concentration"], + ) + + # create gwf output control package + # output file names + gwf_budget_file = f"{gwf_name}.bud" + gwf_head_file = f"{gwf_name}.hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=gwf_budget_file, + head_filerecord=gwf_head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + # create iterative model solution for gwf model + ims = flopy.mf6.ModflowIms(sim) + + return sim + + +@pytest.fixture +def mp7_sim(gwf_sim): + gwf = gwf_sim.get_model() + ws = gwf_sim.sim_path.parent + mp7_ws = ws / "mp7" + releasepts_mp7 = [ + # node number, localx, localy, localz + (0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) + for i in range(9) + ] + + partdata = get_partdata(gwf.modelgrid, releasepts_mp7) + mp7_name = "plotutil_mp7" + pg = flopy.modpath.ParticleGroup( + particlegroupname="G1", + particledata=partdata, + filename=f"{mp7_name}.sloc", + ) + mp = flopy.modpath.Modpath7( + modelname=mp7_name, + flowmodel=gwf, + exe_name="mp7", + model_ws=mp7_ws, + headfilename=f"{gwf.name}.hds", + budgetfilename=f"{gwf.name}.bud", + ) + mpbas = flopy.modpath.Modpath7Bas( + mp, + porosity=porosity, + ) + mpsim = flopy.modpath.Modpath7Sim( + mp, + simulationtype="pathline", + trackingdirection="forward", + budgetoutputoption="summary", + stoptimeoption="extend", + particlegroups=[pg], + ) + + return mp + + +@pytest.fixture +def prt_sim(gwf_sim): + ws = gwf_sim.sim_path.parent + gwf_ws = ws / "gwf" + prt_ws = ws / "prt" + prt_name = "plotutil_prt" + gwf_name = "plotutil_gwf" + releasepts_prt = [ + # particle index, k, i, j, x, y, z + [i, 0, 0, 0, float(f"0.{i + 1}"), float(f"9.{i + 1}"), 0.5] + for i in range(9) + ] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=prt_name, + exe_name="mf6", + version="mf6", + sim_ws=prt_ws, + ) + + # create tdis package + flopy.mf6.modflow.mftdis.ModflowTdis( + sim, + pname="tdis", + time_units="DAYS", + nper=nper, + perioddata=[ + ( + perlen, + nstp, + tsmult, + ) + ], + ) + + # create prt model + prt = flopy.mf6.ModflowPrt(sim, modelname=prt_name, save_flows=True) + + # create prt discretization + flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( + prt, + pname="dis", + nlay=nlay, + nrow=nrow, + ncol=ncol, + ) + + # create mip package + flopy.mf6.ModflowPrtmip(prt, pname="mip", porosity=porosity) + + # create prp package + prp_track_file = f"{prt_name}.prp.trk" + prp_track_csv_file = f"{prt_name}.prp.trk.csv" + flopy.mf6.ModflowPrtprp( + prt, + pname="prp1", + filename=f"{prt_name}_1.prp", + nreleasepts=len(releasepts_prt), + packagedata=releasepts_prt, + perioddata={0: ["FIRST"]}, + track_filerecord=[prp_track_file], + trackcsv_filerecord=[prp_track_csv_file], + stop_at_weak_sink="saws" in prt_name, + boundnames=True, + exit_solve_tolerance=1e-5, + ) + + # create output control package + prt_budget_file = f"{prt_name}.bud" + prt_track_file = f"{prt_name}.trk" + prt_track_csv_file = f"{prt_name}.trk.csv" + flopy.mf6.ModflowPrtoc( + prt, + pname="oc", + budget_filerecord=[prt_budget_file], + track_filerecord=[prt_track_file], + trackcsv_filerecord=[prt_track_csv_file], + saverecord=[("BUDGET", "ALL")], + ) + + # create the flow model interface + gwf_budget_file = gwf_ws / f"{gwf_name}.bud" + gwf_head_file = gwf_ws / f"{gwf_name}.hds" + flopy.mf6.ModflowPrtfmi( + prt, + packagedata=[ + ("GWFHEAD", gwf_head_file), + ("GWFBUDGET", gwf_budget_file), + ], + ) + + # add explicit model solution + ems = flopy.mf6.ModflowEms( + sim, + pname="ems", + filename=f"{prt_name}.ems", + ) + sim.register_solution_package(ems, [prt.name]) + + return sim + + +# todo: finish tests + +# @pytest.mark.parametrize("csv", [False, True]) +# def test_init(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# assert file.fname == path +# if path.suffix == ".csv": +# assert len(file._data) == len(open(path).readlines()) - 1 +# +# +# @pytest.mark.parametrize("csv", [False, True]) +# def test_intersect(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# nodes = [1, 11, 21] +# intersection = file.intersect(nodes) +# assert any(intersection) +# assert all(d.icell in nodes for d in intersection.itertuples()) +# +# +# @pytest.mark.parametrize("csv", [False, True]) +# def test_validate(gwf_sim, prt_sim, csv): +# +# +# file = PathlineFile(path, header_filename=header_path) +# file.validate() +# diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index fba2dc062d..c4e6533e1e 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -2733,7 +2733,7 @@ def to_mp7_pathlines( ], dtype=MpPathlineFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtypes["base"].names): data = np.core.records.fromarrays( [ data["particleid"], @@ -2762,7 +2762,7 @@ def to_mp7_pathlines( f"{pformat(MpPathlineFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpPathlineFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -2893,7 +2893,7 @@ def to_mp7_endpoints( ], dtype=MpEndpointFile.dtypes[7], ) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtypes["base"].names): data = np.core.records.fromarrays( [ endpts["particleid"], @@ -2933,7 +2933,7 @@ def to_mp7_endpoints( f"{pformat(MpEndpointFile.dtypes[6].names)} for MODPATH 6, or;\n" f"{pformat(MpEndpointFile.dtypes[7].names)} for MODPATH 7, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) return pd.DataFrame(data) if rt == pd.DataFrame else data @@ -3020,7 +3020,7 @@ def to_prt_pathlines( n in data.dtypes for n in PrtPathlineFile.dtypes["base"].names ): # already in prt format? return data if rt == pd.DataFrame else data.to_records(index=False) - elif all(n in data.dtypes for n in ParticleTrackFile._dtype.names): + elif all(n in data.dtypes for n in ParticleTrackFile.dtype["base"].names): data = np.core.records.fromarrays( [ np.zeros(data.shape[0]), @@ -3053,7 +3053,7 @@ def to_prt_pathlines( f"{pformat(MpEndpointFile.dtypes[5].names)} for MODPATH 5 endpoints, or;\n" f"{pformat(MpEndpointFile.dtypes[3].names)} for MODPATH 3 endpoints, or;\n" f"{pformat(PrtPathlineFile.dtypes['base'].names)} for MODFLOW 6 PRT, or;\n" - f"{pformat(ParticleTrackFile._dtype.names)} (minimal expected dtype names)" + f"{pformat(ParticleTrackFile.dtypes['base'].names)} (minimal expected dtype names)" ) if rt == pd.DataFrame: diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index a226d89f3d..3f3585fbcd 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -18,11 +18,6 @@ class ModpathFile(ParticleTrackFile, ABC): """Provides MODPATH output file support.""" - @property - @abstractmethod - def dtypes(self): - pass - def __init__( self, filename: Union[str, os.PathLike], verbose: bool = False ): @@ -167,67 +162,65 @@ class PathlineFile(ModpathFile): """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("particleid", np.int32), - ("x", np.float32), - ("y", np.float32), - ("zloc", np.float32), - ("z", np.float32), - ("time", np.float32), - ("j", np.int32), - ("i", np.int32), - ("k", np.int32), - ("cumulativetimestep", np.int32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ ("particleid", np.int32), - ("particlegroup", np.int32), - ("timepointindex", np.int32), - ("cumulativetimestep", np.int32), - ("time", np.float32), ("x", np.float32), ("y", np.float32), - ("z", np.float32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("grid", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), ("zloc", np.float32), - ("linesegmentindex", np.int32), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("sequencenumber", np.int32), - ("particleidloc", np.int32), - ("time", np.float32), - ("x", np.float32), - ("y", np.float32), ("z", np.float32), + ("time", np.float32), + ("j", np.int32), + ("i", np.int32), ("k", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("stressperiod", np.int32), - ("timestep", np.int32), + ("cumulativetimestep", np.int32), ] ), - } - + ), + 6: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("timepointindex", np.int32), + ("cumulativetimestep", np.int32), + ("time", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("grid", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("linesegmentindex", np.int32), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("sequencenumber", np.int32), + ("particleidloc", np.int32), + ("time", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("stressperiod", np.int32), + ("timestep", np.int32), + ] + ), + } + @property def dtype(self): return self._dtype @@ -459,100 +452,98 @@ class EndpointFile(ModpathFile): """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("zone", np.int32), - ("j", np.int32), - ("i", np.int32), - ("k", np.int32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zloc", np.float32), - ("time", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("zloc0", np.float32), - ("j0", np.int32), - ("i0", np.int32), - ("k0", np.int32), - ("zone0", np.int32), - ("cumulativetimestep", np.int32), - ("ipcode", np.int32), - ("time0", np.float32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("status", np.int32), - ("time0", np.float32), - ("time", np.float32), - ("initialgrid", np.int32), - ("k0", np.int32), - ("i0", np.int32), - ("j0", np.int32), - ("cellface0", np.int32), - ("zone0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ("finalgrid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("cellface", np.int32), ("zone", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), + ("j", np.int32), + ("i", np.int32), + ("k", np.int32), ("x", np.float32), ("y", np.float32), ("z", np.float32), - ("label", "|S40"), - ] - ), - 7: np.dtype( - [ - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("status", np.int32), - ("time0", np.float32), + ("zloc", np.float32), ("time", np.float32), - ("node0", np.int32), - ("k0", np.int32), - ("xloc0", np.float32), - ("yloc0", np.float32), - ("zloc0", np.float32), ("x0", np.float32), ("y0", np.float32), - ("z0", np.float32), + ("zloc0", np.float32), + ("j0", np.int32), + ("i0", np.int32), + ("k0", np.int32), ("zone0", np.int32), - ("initialcellface", np.int32), - ("node", np.int32), - ("k", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zone", np.int32), - ("cellface", np.int32), + ("cumulativetimestep", np.int32), + ("ipcode", np.int32), + ("time0", np.float32), ] ), - } + ), + 6: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("status", np.int32), + ("time0", np.float32), + ("time", np.float32), + ("initialgrid", np.int32), + ("k0", np.int32), + ("i0", np.int32), + ("j0", np.int32), + ("cellface0", np.int32), + ("zone0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("z0", np.float32), + ("finalgrid", np.int32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("cellface", np.int32), + ("zone", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("label", "|S40"), + ] + ), + 7: np.dtype( + [ + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("status", np.int32), + ("time0", np.float32), + ("time", np.float32), + ("node0", np.int32), + ("k0", np.int32), + ("xloc0", np.float32), + ("yloc0", np.float32), + ("zloc0", np.float32), + ("x0", np.float32), + ("y0", np.float32), + ("z0", np.float32), + ("zone0", np.int32), + ("initialcellface", np.int32), + ("node", np.int32), + ("k", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("zone", np.int32), + ("cellface", np.int32), + ] + ), + } @property def dtype(self): @@ -817,63 +808,61 @@ class TimeseriesFile(ModpathFile): >>> ts1 = tsobj.get_data(partid=1) """ - @property - def dtypes(self): - return { - **dict.fromkeys( - [3, 5], - np.dtype( - [ - ("timestepindex", np.int32), - ("particleid", np.int32), - ("node", np.int32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("zloc", np.float32), - ("time", np.float32), - ("timestep", np.int32), - ] - ), - ), - 6: np.dtype( + dtypes = { + **dict.fromkeys( + [3, 5], + np.dtype( [ - ("timepointindex", np.int32), - ("timestep", np.int32), - ("time", np.float32), + ("timestepindex", np.int32), ("particleid", np.int32), - ("particlegroup", np.int32), + ("node", np.int32), ("x", np.float32), ("y", np.float32), ("z", np.float32), - ("grid", np.int32), - ("k", np.int32), - ("i", np.int32), - ("j", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), ("zloc", np.float32), - ] - ), - 7: np.dtype( - [ - ("timepointindex", np.int32), - ("timestep", np.int32), ("time", np.float32), - ("particleid", np.int32), - ("particlegroup", np.int32), - ("particleidloc", np.int32), - ("node", np.int32), - ("xloc", np.float32), - ("yloc", np.float32), - ("zloc", np.float32), - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("k", np.int32), + ("timestep", np.int32), ] ), - } + ), + 6: np.dtype( + [ + ("timepointindex", np.int32), + ("timestep", np.int32), + ("time", np.float32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("grid", np.int32), + ("k", np.int32), + ("i", np.int32), + ("j", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ] + ), + 7: np.dtype( + [ + ("timepointindex", np.int32), + ("timestep", np.int32), + ("time", np.float32), + ("particleid", np.int32), + ("particlegroup", np.int32), + ("particleidloc", np.int32), + ("node", np.int32), + ("xloc", np.float32), + ("yloc", np.float32), + ("zloc", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("k", np.int32), + ] + ), + } @property def dtype(self): diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 186267fb7f..e3fcd5653d 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -9,8 +9,8 @@ from typing import Union import numpy as np -from numpy.lib.recfunctions import stack_arrays import pandas as pd +from numpy.lib.recfunctions import stack_arrays MIN_PARTICLE_TRACK_DTYPE = np.dtype( [ @@ -44,13 +44,8 @@ class ParticleTrackFile(ABC): Minimal data shared by all particle track file formats. """ - @property - @abstractmethod - def dtype(self): - """ - Particle track file data dtype. - """ - return MIN_PARTICLE_TRACK_DTYPE + dtypes = {"base": ..., "full": ...} + """Base and full (extended) canonical pathline data dtypes.""" def __init__( self, @@ -126,6 +121,12 @@ def get_data( return data[idx] + def get_dataframe(self) -> pd.DataFrame: + return self._data + + def get_recarray(self) -> np.recarray: + return self._data.to_records(index=False) + def get_alldata(self, totim=None, ge=True, minimal=False): """ Get all particle tracks separately, optionally filtering by time. @@ -343,8 +344,20 @@ def validate(self): for dt in dtype.values(): self.validate(dt) elif isinstance(dtype, pd.Series): - subset = OrderedDict({k: v for k, v in dtype.to_dict().items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) + subset = OrderedDict( + { + k: v + for k, v in dtype.to_dict().items() + if k in MIN_PARTICLE_TRACK_DTYPE.names + } + ) assert subset == expected elif isinstance(dtype, np.dtypes.VoidDType): - subset = OrderedDict({k: v for k, v in dtype.fields.items() if k in MIN_PARTICLE_TRACK_DTYPE.names}) - assert subset == expected \ No newline at end of file + subset = OrderedDict( + { + k: v + for k, v in dtype.fields.items() + if k in MIN_PARTICLE_TRACK_DTYPE.names + } + ) + assert subset == expected diff --git a/flopy/utils/prtfile.py b/flopy/utils/prtfile.py index 219ef6d3ba..5841f0e02c 100644 --- a/flopy/utils/prtfile.py +++ b/flopy/utils/prtfile.py @@ -18,13 +18,66 @@ class PathlineFile(ParticleTrackFile): key_cols = ["imdl", "iprp", "irpt", "trelease"] """Columns making up each particle's composite key.""" + dtypes = { + "base": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("icell", np.int32), + ("izone", np.int32), + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.float32), + ("t", np.float32), + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("name", np.str_), + ] + ), + "full": np.dtype( + [ + ("kper", np.int32), + ("kstp", np.int32), + ("imdl", np.int32), + ("iprp", np.int32), + ("irpt", np.int32), + ("ilay", np.int32), + ("k", np.int32), # canonical base dtype + ("icell", np.int32), + ("izone", np.int32), + ("idest", np.int32), # canonical full dtype + ("dest", np.str_), # canonical full dtype + ("istatus", np.int32), + ("ireason", np.int32), + ("trelease", np.float32), + ("t", np.float32), + ("t0", np.float32), # canonical full dtype + ("tt", np.float32), # canonical full dtype + ("time", np.float32), # canonical full dtype + ("x", np.float32), + ("y", np.float32), + ("z", np.float32), + ("particleid", np.int32), # canonical base dtype + ("name", np.str_), + ] + ), + } + """Base and full (extended) PRT pathline data dtypes.""" + @property def dtype(self): """ PRT track file dtype. This is loaded dynamically from the binary header file or CSV file headers. A best-effort attempt is made to add extra columns to comply with the canonical - `flopy.utils.particletrackfile.dtype`, as well as convenience columns, including release - and termination time and destination zone number and name. + `flopy.utils.particletrackfile.dtypes["base"]`, as well as convenience columns including + release and termination time, and destination zone number and name, for the full dtype. + If the loaded dtype is discovered to be different from `PrtPathlineFile.dtypes["base"]`, + a warning will be issued. Consumers of this class which expect the canonical particle track file attributes should call `validate()` to make sure the attributes were successfully loaded.