-
Notifications
You must be signed in to change notification settings - Fork 118
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(PRT): add particle tracking model
Co-authored-by: Alden Provost <[email protected]>
- Loading branch information
1 parent
c50ec80
commit ed0f689
Showing
122 changed files
with
18,537 additions
and
58 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,255 @@ | ||
import os | ||
from types import SimpleNamespace | ||
from typing import Optional, Tuple, Union | ||
|
||
import flopy | ||
import numpy as np | ||
import pandas as pd | ||
|
||
|
||
def all_equal(col, val): | ||
a = col.to_numpy() | ||
return a[0] == val and (a[0] == a).all() | ||
|
||
|
||
def get_gwf_sim( | ||
name, ws, mf6 | ||
) -> Tuple[flopy.mf6.MFSimulation, SimpleNamespace]: | ||
# test case context | ||
ctx = SimpleNamespace( | ||
nlay=1, | ||
nrow=10, | ||
ncol=10, | ||
top=1.0, | ||
botm=[0.0], | ||
nper=1, | ||
perlen=1.0, | ||
nstp=1, | ||
tsmult=1.0, | ||
porosity=0.1, | ||
# mp7 release points (cell-local coordinates) | ||
releasepts_mp7=[ | ||
# node number, localx, localy, localz | ||
# (0-based indexing converted to 1-based for mp7 by flopy) | ||
(0, float(f"0.{i + 1}"), float(f"0.{i + 1}"), 0.5) | ||
for i in range(9) | ||
], | ||
# expected release points in PRT format; use flopy | ||
# to convert mp7 to prt format then check equality | ||
releasepts_prt=[ | ||
# particle index, k, i, j, x, y, z | ||
# (0-based indexing converted to 1-based for mf6 by flopy) | ||
[i, 0, 0, 0, float(f"0.{i + 1}"), float(f"9.{i + 1}"), 0.5] | ||
for i in range(9) | ||
], | ||
) | ||
|
||
# create simulation | ||
sim = flopy.mf6.MFSimulation( | ||
sim_name=name, | ||
exe_name=mf6, | ||
version="mf6", | ||
sim_ws=ws, | ||
) | ||
|
||
# create tdis package | ||
flopy.mf6.modflow.mftdis.ModflowTdis( | ||
sim, | ||
pname="tdis", | ||
time_units="DAYS", | ||
nper=ctx.nper, | ||
perioddata=[(ctx.perlen, ctx.nstp, ctx.tsmult)], | ||
) | ||
|
||
# create gwf model | ||
gwfname = f"{name}_gwf" | ||
gwf = flopy.mf6.ModflowGwf(sim, modelname=gwfname, save_flows=True) | ||
|
||
# create gwf discretization | ||
flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( | ||
gwf, | ||
pname="dis", | ||
nlay=ctx.nlay, | ||
nrow=ctx.nrow, | ||
ncol=ctx.ncol, | ||
) | ||
|
||
# create gwf initial conditions package | ||
flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic") | ||
|
||
# create gwf node property flow package | ||
flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( | ||
gwf, | ||
pname="npf", | ||
save_saturation=True, | ||
save_specific_discharge=True, | ||
) | ||
|
||
# create gwf chd package | ||
spd = { | ||
0: [[(0, 0, 0), 1.0, 1.0], [(0, 9, 9), 0.0, 0.0]], | ||
1: [[(0, 0, 0), 0.0, 0.0], [(0, 9, 9), 1.0, 2.0]], | ||
} | ||
chd = flopy.mf6.ModflowGwfchd( | ||
gwf, | ||
pname="CHD-1", | ||
stress_period_data=spd, | ||
auxiliary=["concentration"], | ||
) | ||
|
||
# create gwf output control package | ||
# output file names | ||
gwf_budget_file = f"{gwfname}.bud" | ||
gwf_head_file = f"{gwfname}.hds" | ||
oc = flopy.mf6.ModflowGwfoc( | ||
gwf, | ||
budget_filerecord=gwf_budget_file, | ||
head_filerecord=gwf_head_file, | ||
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], | ||
) | ||
|
||
# create iterative model solution for gwf model | ||
ims = flopy.mf6.ModflowIms(sim) | ||
|
||
return sim, ctx | ||
|
||
|
||
def check_track_data( | ||
track_bin: os.PathLike, | ||
track_hdr: os.PathLike, | ||
track_csv: os.PathLike, | ||
): | ||
"""Check that track data written to binary, CSV, and budget files are equal.""" | ||
|
||
# get dtype from ascii header file | ||
dt = get_track_dtype(track_hdr) | ||
|
||
# read output files | ||
data_bin = np.fromfile(track_bin, dtype=dt) | ||
data_csv = np.genfromtxt(track_csv, dtype=dt, delimiter=",", names=True) | ||
if len(data_csv.shape) == 0: | ||
# https://stackoverflow.com/a/24943993/6514033 | ||
data_csv = np.array([data_csv]) | ||
|
||
assert ( | ||
data_bin.shape == data_csv.shape | ||
), f"Binary and CSV track data shapes do not match: {data_bin.shape} != {data_csv.shape}" | ||
|
||
# check particle tracks written to all output files are equal | ||
# check each column separately to avoid: | ||
# TypeError: The DType <class 'numpy._FloatAbstractDType'> could not be promoted by <class 'numpy.dtype[void]'> | ||
for k in data_bin.dtype.names: | ||
if k == "name": | ||
continue | ||
assert np.allclose(data_bin[k], data_csv[k], equal_nan=True) | ||
|
||
# make sure columns all have values in the expected range | ||
assert all(data_bin["iprp"] >= 1) | ||
assert all(data_bin["irpt"] >= 1) | ||
assert all(data_bin["kper"] >= 1) | ||
assert all(data_bin["kstp"] >= 1) | ||
assert all(data_bin["ilay"] >= 1) | ||
assert all(data_bin["icell"] >= 1) | ||
assert all(data_bin["istatus"] >= 0) | ||
assert all(data_bin["ireason"] >= 0) | ||
|
||
|
||
def check_budget_data(lst: os.PathLike, perlen=1, nper=1, nstp=1): | ||
# load PRT model's list file | ||
mflist = flopy.utils.mflistfile.ListBudget( | ||
lst, budgetkey="MASS BUDGET FOR ENTIRE MODEL" | ||
) | ||
names = mflist.get_record_names() | ||
entries = mflist.entries | ||
|
||
# check timesteps | ||
inc = mflist.get_incremental() | ||
v = inc["totim"][-1] | ||
exp = float(perlen * nper) | ||
assert v == exp, f"Last time should be {exp}, found {v}" | ||
|
||
# entries should be a subset of names | ||
assert all(e in names for e in entries) | ||
|
||
# todo what other record names should we expect? | ||
expected_entries = [ | ||
"PRP_IN", | ||
"PRP_OUT", | ||
] | ||
assert all(en in names for en in expected_entries) | ||
|
||
|
||
def get_model_name(name, mdl): | ||
return f"{name}_{mdl}" | ||
|
||
|
||
def get_track_dtype(path: os.PathLike): | ||
"""Get the dtype of the track data recarray from the ascii header file.""" | ||
|
||
hdr_lns = open(path).readlines() | ||
hdr_lns_spl = [[ll.strip() for ll in l.split(",")] for l in hdr_lns] | ||
return np.dtype(list(zip(hdr_lns_spl[0], hdr_lns_spl[1]))) | ||
|
||
|
||
def get_ireason_code(output_event): | ||
return ( | ||
0 | ||
if output_event == "RELEASE" | ||
else 1 | ||
if output_event == "TRANSIT" | ||
else 2 | ||
if output_event == "TIMESTEP" | ||
else 3 | ||
if output_event == "TERMINATE" | ||
else 4 | ||
if output_event == "WEAKSINK" | ||
else -1 # default | ||
) | ||
|
||
|
||
def get_partdata(grid, rpts): | ||
if grid.grid_type == "structured": | ||
return flopy.modpath.ParticleData( | ||
partlocs=[grid.get_lrc(p[0])[0] for p in rpts], | ||
structured=True, | ||
localx=[p[1] for p in rpts], | ||
localy=[p[2] for p in rpts], | ||
localz=[p[3] for p in rpts], | ||
timeoffset=0, | ||
drape=0, | ||
) | ||
else: | ||
return flopy.modpath.ParticleData( | ||
partlocs=[p[0] for p in rpts], | ||
structured=False, | ||
localx=[p[1] for p in rpts], | ||
localy=[p[2] for p in rpts], | ||
localz=[p[3] for p in rpts], | ||
timeoffset=0, | ||
drape=0, | ||
) | ||
|
||
|
||
def get_perioddata(name, periods=1, fraction=None) -> Optional[dict]: | ||
if "reft" in name: | ||
return None | ||
opt = [ | ||
"FIRST" | ||
if "frst" in name | ||
else "ALL" | ||
if "all" in name | ||
else ("STEPS", 1) | ||
if "stps" in name | ||
else None | ||
] | ||
if opt[0] is None: | ||
raise ValueError(f"Invalid period option: {name}") | ||
if fraction is not None: | ||
opt.append(("FRACTION", fraction)) | ||
return {i: opt for i in range(periods)} | ||
|
||
|
||
def has_default_boundnames(data): | ||
name = [int(n.partition("0")[2]) for n in data["name"].to_numpy()] | ||
irpt = data["irpt"].to_numpy() | ||
return np.array_equal(name, irpt) |
Oops, something went wrong.