diff --git a/.docs/Notebooks/modpath7_structured_transient_example.py b/.docs/Notebooks/modpath7_structured_transient_example.py index 717b77a3e0..fb76ebff6b 100644 --- a/.docs/Notebooks/modpath7_structured_transient_example.py +++ b/.docs/Notebooks/modpath7_structured_transient_example.py @@ -6,15 +6,25 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.14.5 +# jupytext_version: 1.14.4 # kernelspec: -# display_name: Python 3 +# display_name: Python 3 (ipykernel) # language: python # name: python3 +# language_info: +# codemirror_mode: +# name: ipython +# version: 3 +# file_extension: .py +# mimetype: text/x-python +# name: python +# nbconvert_exporter: python +# pygments_lexer: ipython3 +# version: 3.9.12 # metadata: -# section: modpath # authors: -# - name: Wes Bonelli +# - name: Wes Bonelli +# section: modpath # --- # # Using MODPATH 7 with structured grids (transient example) @@ -228,7 +238,7 @@ def no_flow(w): [drain[0], drain[1], i + drain[2][0], 322.5, 100000.0, 6] for i in range(drain[2][1] - drain[2][0]) ] -drn = flopy.mf6.modflow.mfgwfdrn.ModflowGwfdrn(gwf, stress_period_data={0: dd}) +drn = flopy.mf6.modflow.mfgwfdrn.ModflowGwfdrn(gwf, auxiliary=["IFACE"], stress_period_data={0: dd}) # output control headfile = f"{sim_name}.hds" @@ -400,3 +410,5 @@ def add_legend(ax): temp_dir.cleanup() except: pass + + diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 408165187e..7fd6e9161d 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -1091,7 +1091,7 @@ def test_np002(function_tmpdir, example_data_path): md2 = sim2.get_model() ghb2 = md2.get_package("ghb") spd2 = ghb2.stress_period_data.get_data(1) - assert spd2 == [] + assert len(spd2) == 0 # test paths sim_path_test = Path(ws) / "sim_path" diff --git a/autotest/regression/test_mf6_pandas.py b/autotest/regression/test_mf6_pandas.py new file mode 100644 index 0000000000..c8123396b5 --- /dev/null +++ b/autotest/regression/test_mf6_pandas.py @@ -0,0 +1,353 @@ +import copy +import os +import shutil +import sys +from pathlib import Path + +import numpy as np +import pandas +import pandas as pd +import pytest +from modflow_devtools.markers import requires_exe, requires_pkg + +import flopy +from flopy.mf6 import ( + ExtFileAction, + MFModel, + MFSimulation, + ModflowGwf, + ModflowGwfchd, + ModflowGwfdis, + ModflowGwfdisv, + ModflowGwfdrn, + ModflowGwfevt, + ModflowGwfevta, + ModflowGwfghb, + ModflowGwfgnc, + ModflowGwfgwf, + ModflowGwfgwt, + ModflowGwfhfb, + ModflowGwfic, + ModflowGwfnpf, + ModflowGwfoc, + ModflowGwfrch, + ModflowGwfrcha, + ModflowGwfriv, + ModflowGwfsfr, + ModflowGwfsto, + ModflowGwfwel, + ModflowGwtadv, + ModflowGwtdis, + ModflowGwtic, + ModflowGwtmst, + ModflowGwtoc, + ModflowGwtssm, + ModflowIms, + ModflowTdis, + ModflowUtltas, +) +from flopy.mf6.data.mfdataplist import MFPandasList + + +pytestmark = pytest.mark.mf6 + + +@requires_exe("mf6") +@pytest.mark.regression +def test_pandas_001(function_tmpdir, example_data_path): + # init paths + test_ex_name = "pd001" + model_name = "pd001_mod" + data_path = example_data_path / "mf6" / "create_tests" / test_ex_name + ws = function_tmpdir / "ws" + + expected_output_folder = data_path / "expected_output" + expected_head_file = expected_output_folder / "pd001_mod.hds" + expected_cbc_file = expected_output_folder / "pd001_mod.cbc" + + # model tests + sim = MFSimulation( + sim_name=test_ex_name, + version="mf6", + exe_name="mf6", + sim_ws=ws, + continue_=True, + memory_print_option="summary", + use_pandas=True, + ) + name = sim.name_file + assert name.continue_.get_data() + assert name.nocheck.get_data() is None + assert name.memory_print_option.get_data() == "summary" + assert sim.simulation_data.use_pandas + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis_package = ModflowTdis( + sim, time_units="DAYS", nper=2, perioddata=tdis_rc + ) + # replace with real ims file + ims_package = ModflowIms( + sim, + pname="my_ims_file", + filename=f"{test_ex_name}.ims", + print_option="ALL", + complexity="SIMPLE", + outer_dvclose=0.00001, + outer_maximum=50, + under_relaxation="NONE", + inner_maximum=30, + inner_dvclose=0.00001, + linear_acceleration="CG", + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2, + ) + model = ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + top = {"filename": "top.txt", "data": 100.0} + botm = {"filename": "botm.txt", "data": 50.0} + dis_package = ModflowGwfdis( + model, + length_units="FEET", + nlay=1, + nrow=1, + ncol=10, + delr=500.0, + delc=500.0, + top=top, + botm=botm, + filename=f"{model_name}.dis", + pname="mydispkg", + ) + ic_package = ModflowGwfic(model, strt=80.0, filename=f"{model_name}.ic") + npf_package = ModflowGwfnpf( + model, + save_flows=True, + alternative_cell_averaging="logarithmic", + icelltype=1, + k=5.0, + ) + oc_package = ModflowGwfoc( + model, + budget_filerecord=[("np001_mod 1.cbc",)], + head_filerecord=[("np001_mod 1.hds",)], + saverecord={ + 0: [("HEAD", "ALL"), ("BUDGET", "ALL")], + 1: [], + }, + printrecord=[("HEAD", "ALL")], + ) + empty_sp_text = oc_package.saverecord.get_file_entry(1) + assert empty_sp_text == "" + oc_package.printrecord.add_transient_key(1) + oc_package.printrecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + oc_package.saverecord.set_data([("HEAD", "ALL"), ("BUDGET", "ALL")], 1) + sto_package = ModflowGwfsto( + model, save_flows=True, iconvert=1, ss=0.000001, sy=0.15 + ) + + # test saving a text file with recarray data + data_line = [((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)] + type_list = [("cellid", object), ("q", float)] + data_rec = np.rec.array(data_line, type_list) + well_spd = { + 0: { + "filename": "wel_0.txt", + "data": data_rec, + }, + 1: None, + } + wel_package = ModflowGwfwel( + model, + filename=f"{model_name}.wel", + print_input=True, + print_flows=True, + save_flows=True, + maxbound=2, + stress_period_data=well_spd, + ) + + wel_package.stress_period_data.add_transient_key(1) + # text user generated pandas dataframe without headers + data_pd = pd.DataFrame([(0, 0, 4, -1000.0), (0, 0, 7, -20.0)]) + wel_package.stress_period_data.set_data( + {1: {"filename": "wel_1.txt", "iprn": 1, "data": data_pd}} + ) + + # test getting data + assert isinstance(wel_package.stress_period_data, MFPandasList) + well_data_pd = wel_package.stress_period_data.get_dataframe(0) + assert isinstance(well_data_pd, pd.DataFrame) + assert well_data_pd.iloc[0, 0] == 0 + assert well_data_pd.iloc[0, 1] == 0 + assert well_data_pd.iloc[0, 2] == 4 + assert well_data_pd.iloc[0, 3] == -2000.0 + assert well_data_pd["layer"][0] == 0 + assert well_data_pd["row"][0] == 0 + assert well_data_pd["column"][0] == 4 + assert well_data_pd["q"][0] == -2000.0 + assert well_data_pd["layer"][1] == 0 + assert well_data_pd["row"][1] == 0 + assert well_data_pd["column"][1] == 7 + assert well_data_pd["q"][1] == -2.0 + + well_data_rec = wel_package.stress_period_data.get_data(0) + assert isinstance(well_data_rec, np.recarray) + assert well_data_rec[0][0] == (0, 0, 4) + assert well_data_rec[0][1] == -2000.0 + + # test time series dat + drn_package = ModflowGwfdrn( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + timeseries=[(0.0, 60.0), (100000.0, 60.0)], + stress_period_data=[((0, 0, 0), 80, "drn_1")], + ) + drn_package.ts.time_series_namerecord = "drn_1" + drn_package.ts.interpolation_methodrecord = "linearend" + + # test data with aux vars + riv_spd = { + 0: { + "filename": "riv_0.txt", + "data": [((0, 0, 9), 110, 90.0, 100.0, 1.0, 2.0, 3.0)], + } + } + riv_package = ModflowGwfriv( + model, + print_input=True, + print_flows=True, + save_flows=True, + maxbound=1, + auxiliary=["var1", "var2", "var3"], + stress_period_data=riv_spd, + ) + riv_data = riv_package.stress_period_data.get_data(0) + assert riv_data[0][0] == (0, 0, 9) + assert riv_data[0][1] == 110 + assert riv_data[0][2] == 90.0 + assert riv_data[0][3] == 100.0 + assert riv_data[0][4] == 1.0 + assert riv_data[0][5] == 2.0 + assert riv_data[0][6] == 3.0 + + # write simulation to new location + sim.write_simulation() + + # run simulation + success, buff = sim.run_simulation() + assert success, f"simulation {sim.name} did not run" + + # modify external files to resemble user generated text + wel_file_0_pth = os.path.join(sim.sim_path, "wel_0.txt") + wel_file_1_pth = os.path.join(sim.sim_path, "wel_1.txt") + riv_file_0_pth = os.path.join(sim.sim_path, "riv_0.txt") + with open(wel_file_0_pth, "w") as fd_wel_0: + fd_wel_0.write("# comment header\n\n") + fd_wel_0.write("1 1 5 -2000.0 # comment\n") + fd_wel_0.write("# more comments\n") + fd_wel_0.write("1 1 8 -2.0\n") + fd_wel_0.write("# more comments\n") + + with open(wel_file_1_pth, "w") as fd_wel_1: + fd_wel_1.write("# comment header\n\n") + fd_wel_1.write("\t1\t1\t5\t-1000.0\t# comment\n") + fd_wel_1.write("# more comments\n") + fd_wel_1.write("1 1\t8\t-20.0\n") + fd_wel_1.write("# more comments\n") + + with open(riv_file_0_pth, "w") as fd_riv_0: + fd_riv_0.write("# comment header\n\n") + fd_riv_0.write( + "1\t1\t10\t110\t9.00000000E+01\t1.00000000E+02" + "\t1.00000000E+00\t2.00000000E+00\t3.00000000E+00" + "\t# comment\n" + ) + + # test loading and checking data + test_sim = MFSimulation.load( + test_ex_name, + "mf6", + "mf6", + sim.sim_path, + write_headers=False, + ) + test_mod = test_sim.get_model() + test_wel = test_mod.get_package("wel") + + well_data_pd_0 = test_wel.stress_period_data.get_dataframe(0) + assert isinstance(well_data_pd_0, pd.DataFrame) + assert well_data_pd_0.iloc[0, 0] == 0 + assert well_data_pd_0.iloc[0, 1] == 0 + assert well_data_pd_0.iloc[0, 2] == 4 + assert well_data_pd_0.iloc[0, 3] == -2000.0 + assert well_data_pd_0["layer"][0] == 0 + assert well_data_pd_0["row"][0] == 0 + assert well_data_pd_0["column"][0] == 4 + assert well_data_pd_0["q"][0] == -2000.0 + assert well_data_pd_0["layer"][1] == 0 + assert well_data_pd_0["row"][1] == 0 + assert well_data_pd_0["column"][1] == 7 + assert well_data_pd_0["q"][1] == -2.0 + well_data_pd = test_wel.stress_period_data.get_dataframe(1) + assert isinstance(well_data_pd, pd.DataFrame) + assert well_data_pd.iloc[0, 0] == 0 + assert well_data_pd.iloc[0, 1] == 0 + assert well_data_pd.iloc[0, 2] == 4 + assert well_data_pd.iloc[0, 3] == -1000.0 + assert well_data_pd["layer"][0] == 0 + assert well_data_pd["row"][0] == 0 + assert well_data_pd["column"][0] == 4 + assert well_data_pd["q"][0] == -1000.0 + assert well_data_pd["layer"][1] == 0 + assert well_data_pd["row"][1] == 0 + assert well_data_pd["column"][1] == 7 + assert well_data_pd["q"][1] == -20.0 + test_riv = test_mod.get_package("riv") + riv_data_pd = test_riv.stress_period_data.get_dataframe(0) + assert riv_data_pd.iloc[0, 0] == 0 + assert riv_data_pd.iloc[0, 1] == 0 + assert riv_data_pd.iloc[0, 2] == 9 + assert riv_data_pd.iloc[0, 3] == 110 + assert riv_data_pd.iloc[0, 4] == 90.0 + assert riv_data_pd.iloc[0, 5] == 100.0 + assert riv_data_pd.iloc[0, 6] == 1.0 + assert riv_data_pd.iloc[0, 7] == 2.0 + assert riv_data_pd.iloc[0, 8] == 3.0 + + well_data_array = test_wel.stress_period_data.to_array() + assert "q" in well_data_array + array = np.array([0.0, 0.0, 0.0, 0.0, -2000.0, 0.0, 0.0, -2.0, 0.0, 0.0]) + array.reshape((1, 1, 10)) + compare = well_data_array["q"] == array + assert compare.all() + + well_data_record = test_wel.stress_period_data.get_record() + assert 0 in well_data_record + assert "binary" in well_data_record[0] + well_data_record[0]["binary"] = True + well_data_pd_0.iloc[0, 3] = -10000.0 + well_data_record[0]["data"] = well_data_pd_0 + test_wel.stress_period_data.set_record(well_data_record) + + updated_record = test_wel.stress_period_data.get_record(data_frame=True) + assert 0 in updated_record + assert "binary" in updated_record[0] + assert updated_record[0]["binary"] + assert isinstance(updated_record[0]["data"], pandas.DataFrame) + assert updated_record[0]["data"]["q"][0] == -10000 + + record = [0, 0, 2, -111.0] + test_wel.stress_period_data.append_list_as_record(record, 0) + + combined_data = test_wel.stress_period_data.get_dataframe(0) + assert len(combined_data.axes[0]) == 3 + assert combined_data["q"][2] == -111.0 + + test_drn = test_mod.get_package("drn") + file_entry = test_drn.stress_period_data.get_file_entry() + assert file_entry.strip() == "1 1 1 80 drn_1" diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 71d231da40..0bf3832f21 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -1817,7 +1817,7 @@ def test_array(function_tmpdir): drn_gd_1 = drn.stress_period_data.get_data(1) assert drn_gd_1 is None drn_gd_2 = drn.stress_period_data.get_data(2) - assert drn_gd_2 == [] + assert len(drn_gd_2) == 0 drn_gd_3 = drn.stress_period_data.get_data(3) assert drn_gd_3[0][1] == 55.0 @@ -1964,7 +1964,7 @@ def test_array(function_tmpdir): drn_gd_1 = drn.stress_period_data.get_data(1) assert drn_gd_1 is None drn_gd_2 = drn.stress_period_data.get_data(2) - assert drn_gd_2 == [] + assert len(drn_gd_2) == 0 drn_gd_3 = drn.stress_period_data.get_data(3) assert drn_gd_3[0][1] == 55.0 diff --git a/autotest/test_model_splitter.py b/autotest/test_model_splitter.py index e743399d91..e28e809a47 100644 --- a/autotest/test_model_splitter.py +++ b/autotest/test_model_splitter.py @@ -344,19 +344,15 @@ def test_control_records(function_tmpdir): "Binary file input not being preserved for MFArray" ) - spd_ls1 = ml1.wel.stress_period_data._data_storage[ - 1 - ].layer_storage.multi_dim_list[0] - spd_ls2 = ml1.wel.stress_period_data._data_storage[ - 2 - ].layer_storage.multi_dim_list[0] - - if spd_ls1.data_storage_type.value != 3 or spd_ls1.binary: + spd_ls1 = ml1.wel.stress_period_data.get_record(1) + spd_ls2 = ml1.wel.stress_period_data.get_record(2) + + if spd_ls1["filename"] is None or spd_ls1["binary"]: raise AssertionError( "External ascii files not being preserved for MFList" ) - if spd_ls2.data_storage_type.value != 3 or not spd_ls2.binary: + if spd_ls2["filename"] is None or not spd_ls2["binary"]: raise AssertionError( "External binary file input not being preseved for MFList" ) diff --git a/flopy/mf6/data/dfn/gwf-chd.dfn b/flopy/mf6/data/dfn/gwf-chd.dfn index 543433c1b3..6fbf9c1220 100644 --- a/flopy/mf6/data/dfn/gwf-chd.dfn +++ b/flopy/mf6/data/dfn/gwf-chd.dfn @@ -1,5 +1,6 @@ # --------------------- gwf chd options --------------------- # flopy multi-package +# package-type stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-drn.dfn b/flopy/mf6/data/dfn/gwf-drn.dfn index c03a107800..a1ee3aeb0a 100644 --- a/flopy/mf6/data/dfn/gwf-drn.dfn +++ b/flopy/mf6/data/dfn/gwf-drn.dfn @@ -1,5 +1,6 @@ # --------------------- gwf drn options --------------------- # flopy multi-package +# package-type stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-evt.dfn b/flopy/mf6/data/dfn/gwf-evt.dfn index b66f62301c..7073307a0e 100644 --- a/flopy/mf6/data/dfn/gwf-evt.dfn +++ b/flopy/mf6/data/dfn/gwf-evt.dfn @@ -1,5 +1,6 @@ # --------------------- gwf evt options --------------------- # flopy multi-package +# package-type stress-package block options name fixed_cell diff --git a/flopy/mf6/data/dfn/gwf-evta.dfn b/flopy/mf6/data/dfn/gwf-evta.dfn index 19ca3cec45..22e3060fad 100644 --- a/flopy/mf6/data/dfn/gwf-evta.dfn +++ b/flopy/mf6/data/dfn/gwf-evta.dfn @@ -1,5 +1,6 @@ # --------------------- gwf evta options --------------------- # flopy multi-package +# package-type stress-package block options name readasarrays diff --git a/flopy/mf6/data/dfn/gwf-ghb.dfn b/flopy/mf6/data/dfn/gwf-ghb.dfn index f84a370869..bab639ba85 100644 --- a/flopy/mf6/data/dfn/gwf-ghb.dfn +++ b/flopy/mf6/data/dfn/gwf-ghb.dfn @@ -1,5 +1,6 @@ # --------------------- gwf ghb options --------------------- # flopy multi-package +# package-type stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-lak.dfn b/flopy/mf6/data/dfn/gwf-lak.dfn index d73c4d13b7..3dc9e940c0 100644 --- a/flopy/mf6/data/dfn/gwf-lak.dfn +++ b/flopy/mf6/data/dfn/gwf-lak.dfn @@ -1,5 +1,6 @@ # --------------------- gwf lak options --------------------- # flopy multi-package +# package-type advanced-stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-maw.dfn b/flopy/mf6/data/dfn/gwf-maw.dfn index d9bf3912e4..4a7784baf3 100644 --- a/flopy/mf6/data/dfn/gwf-maw.dfn +++ b/flopy/mf6/data/dfn/gwf-maw.dfn @@ -1,5 +1,6 @@ # --------------------- gwf maw options --------------------- # flopy multi-package +# package-type advanced-stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-rch.dfn b/flopy/mf6/data/dfn/gwf-rch.dfn index 0a36ae078b..d452cdead4 100644 --- a/flopy/mf6/data/dfn/gwf-rch.dfn +++ b/flopy/mf6/data/dfn/gwf-rch.dfn @@ -1,5 +1,6 @@ # --------------------- gwf rch options --------------------- # flopy multi-package +# package-type stress-package block options name fixed_cell diff --git a/flopy/mf6/data/dfn/gwf-rcha.dfn b/flopy/mf6/data/dfn/gwf-rcha.dfn index bc5874ef59..b38ad9148a 100644 --- a/flopy/mf6/data/dfn/gwf-rcha.dfn +++ b/flopy/mf6/data/dfn/gwf-rcha.dfn @@ -1,5 +1,6 @@ # --------------------- gwf rcha options --------------------- # flopy multi-package +# package-type stress-package block options name readasarrays diff --git a/flopy/mf6/data/dfn/gwf-riv.dfn b/flopy/mf6/data/dfn/gwf-riv.dfn index a57b653cc0..52105038b2 100644 --- a/flopy/mf6/data/dfn/gwf-riv.dfn +++ b/flopy/mf6/data/dfn/gwf-riv.dfn @@ -1,5 +1,6 @@ # --------------------- gwf riv options --------------------- # flopy multi-package +# package-type stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-sfr.dfn b/flopy/mf6/data/dfn/gwf-sfr.dfn index 40048aeee5..24da890760 100644 --- a/flopy/mf6/data/dfn/gwf-sfr.dfn +++ b/flopy/mf6/data/dfn/gwf-sfr.dfn @@ -1,5 +1,6 @@ # --------------------- gwf sfr options --------------------- # flopy multi-package +# package-type advanced-stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-uzf.dfn b/flopy/mf6/data/dfn/gwf-uzf.dfn index 1b61e1fb6b..fdf5078eba 100644 --- a/flopy/mf6/data/dfn/gwf-uzf.dfn +++ b/flopy/mf6/data/dfn/gwf-uzf.dfn @@ -1,5 +1,6 @@ # --------------------- gwf uzf options --------------------- # flopy multi-package +# package-type advanced-stress-package block options name auxiliary diff --git a/flopy/mf6/data/dfn/gwf-wel.dfn b/flopy/mf6/data/dfn/gwf-wel.dfn index 4f7710ac5d..56a1293f9c 100644 --- a/flopy/mf6/data/dfn/gwf-wel.dfn +++ b/flopy/mf6/data/dfn/gwf-wel.dfn @@ -1,5 +1,6 @@ # --------------------- gwf wel options --------------------- # flopy multi-package +# package-type stress-package block options name auxiliary diff --git a/flopy/mf6/data/mfdata.py b/flopy/mf6/data/mfdata.py index f4ea5ee043..2aafe39f07 100644 --- a/flopy/mf6/data/mfdata.py +++ b/flopy/mf6/data/mfdata.py @@ -95,9 +95,6 @@ def update_transient_key(self, old_transient_key, new_transient_key): # update current key self._current_key = new_transient_key - def _transient_setup(self, data_storage): - self._data_storage = data_storage - def get_data_prep(self, transient_key=0): if isinstance(transient_key, int): self._verify_sp(transient_key) @@ -596,26 +593,43 @@ def _get_external_formatting_string(self, layer, ext_file_action): else: layer_storage = storage.layer_storage[layer] # resolve external file path + ret, fname = self._get_external_formatting_str( + layer_storage.fname, + layer_storage.factor, + layer_storage.binary, + layer_storage.iprn, + storage.data_structure_type, + ext_file_action, + ) + layer_storage.fname = fname + return ret + + def _get_external_formatting_str( + self, fname, factor, binary, iprn, data_type, ext_file_action + ): file_mgmt = self._simulation_data.mfpath model_name = self._data_dimensions.package_dim.model_dim[0].model_name ext_file_path = file_mgmt.get_updated_path( - layer_storage.fname, model_name, ext_file_action + fname, model_name, ext_file_action ) - layer_storage.fname = datautil.clean_filename(ext_file_path) + fname = datautil.clean_filename(ext_file_path) ext_format = ["OPEN/CLOSE", f"'{ext_file_path}'"] - if storage.data_structure_type != DataStructureType.recarray: - if layer_storage.factor is not None: + if data_type != DataStructureType.recarray: + if factor is not None: data_type = self.structure.get_datum_type( return_enum_type=True ) ext_format.append("FACTOR") if data_type == DatumType.integer: - ext_format.append(str(int(layer_storage.factor))) + ext_format.append(str(int(factor))) else: - ext_format.append(str(layer_storage.factor)) - if layer_storage.binary: + ext_format.append(str(factor)) + if binary: ext_format.append("(BINARY)") - if layer_storage.iprn is not None: + if iprn is not None: ext_format.append("IPRN") - ext_format.append(str(layer_storage.iprn)) - return f"{self._simulation_data.indent_string.join(ext_format)}\n" + ext_format.append(str(iprn)) + return ( + f"{self._simulation_data.indent_string.join(ext_format)}\n", + fname, + ) diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index f54eec2901..51f47bf0c3 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1619,7 +1619,6 @@ def __init__( dimensions=dimensions, block=block, ) - self._transient_setup(self._data_storage) self.repeating = True @property diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py index d622a2500d..efb545bc5d 100644 --- a/flopy/mf6/data/mfdatalist.py +++ b/flopy/mf6/data/mfdatalist.py @@ -12,7 +12,7 @@ from ..mfbase import ExtFileAction, MFDataException, VerbosityLevel from ..utils.mfenums import DiscretizationType from .mfdatastorage import DataStorage, DataStorageType, DataStructureType -from .mfdatautil import to_string +from .mfdatautil import list_to_array, to_string from .mffileaccess import MFFileAccessList from .mfstructure import DatumType, MFDataStructure @@ -30,7 +30,7 @@ class MFList(mfdata.MFMultiDimVar, DataListInterface): data contained in the simulation structure : MFDataStructure describes the structure of the data - data : list or ndarray + data : list or ndarray or None actual data enable : bool enable/disable the array @@ -141,66 +141,9 @@ def to_array(self, kper=0, mask=False): Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the MFDataList dtype names for the stress period data.""" - i0 = 1 sarr = self.get_data(key=kper) - if not isinstance(sarr, list): - sarr = [sarr] - if len(sarr) == 0 or sarr[0] is None: - return None - if "inode" in sarr[0].dtype.names: - raise NotImplementedError() - arrays = {} model_grid = self._data_dimensions.get_model_grid() - - if model_grid._grid_type.value == 1: - shape = ( - model_grid.num_layers(), - model_grid.num_rows(), - model_grid.num_columns(), - ) - elif model_grid._grid_type.value == 2: - shape = ( - model_grid.num_layers(), - model_grid.num_cells_per_layer(), - ) - else: - shape = (model_grid.num_cells_per_layer(),) - - for name in sarr[0].dtype.names[i0:]: - if not sarr[0].dtype.fields[name][0] == object: - arr = np.zeros(shape) - arrays[name] = arr.copy() - - if np.isscalar(sarr[0]): - # if there are no entries for this kper - if sarr[0] == 0: - if mask: - for name, arr in arrays.items(): - arrays[name][:] = np.NaN - return arrays - else: - raise Exception("MfList: something bad happened") - - for name, arr in arrays.items(): - cnt = np.zeros(shape, dtype=np.float64) - for sp_rec in sarr: - if sp_rec is not None: - for rec in sp_rec: - arr[rec["cellid"]] += rec[name] - cnt[rec["cellid"]] += 1.0 - # average keys that should not be added - if name != "cond" and name != "flux": - idx = cnt > 0.0 - arr[idx] /= cnt[idx] - if mask: - arr = np.ma.masked_where(cnt == 0.0, arr) - arr[cnt == 0.0] = np.NaN - - arrays[name] = arr.copy() - # elif mask: - # for name, arr in arrays.items(): - # arrays[name][:] = np.NaN - return arrays + return list_to_array(sarr, model_grid, kper, mask) def new_simulation(self, sim_data): """Initialize MFList object for a new simulation. @@ -1396,7 +1339,7 @@ def load( Parameters ---------- - first_line : str + first_line : str, None A string containing the first line of data in this list. file_handle : file descriptor A file handle for the data file which points to the second @@ -1578,7 +1521,6 @@ def __init__( package=package, block=block, ) - self._transient_setup(self._data_storage) self.repeating = True @property diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py new file mode 100644 index 0000000000..b0d921bf9d --- /dev/null +++ b/flopy/mf6/data/mfdataplist.py @@ -0,0 +1,2499 @@ +import copy +import inspect +import io +import os +import sys + +import numpy as np +import pandas + +from ...datbase import DataListInterface, DataType +from ...discretization.structuredgrid import StructuredGrid +from ...discretization.unstructuredgrid import UnstructuredGrid +from ...discretization.vertexgrid import VertexGrid +from ...utils import datautil +from ..data import mfdata +from ..mfbase import ExtFileAction, MFDataException, VerbosityLevel +from ..utils.mfenums import DiscretizationType +from .mfdatalist import MFList +from .mfdatastorage import DataStorageType, DataStructureType +from .mfdatautil import list_to_array, process_open_close_line +from .mffileaccess import MFFileAccessList +from .mfstructure import DatumType, MFDataStructure + + +class PandasListStorage: + """ + Contains data storage information for a single list. + + Attributes + ---------- + internal_data : ndarray or recarray + data being stored, if full data is being stored internally in memory + data_storage_type : DataStorageType + method used to store the data + fname : str + file name of external file containing the data + factor : int/float + factor to multiply the data by + iprn : int + print code + binary : bool + whether the data is stored in a binary file + modified : bool + whether data in storage has been modified since last write + + Methods + ------- + get_record : dict + returns a dictionary with all data storage information + set_record(rec) + sets data storage information based on the the dictionary "rec" + set_internal(internal_data) + make data storage internal, using "internal_data" as the data + set_external(fname, data) + make data storage external, with file "fname" and external data "data" + internal_size : int + size of the internal data + has_data : bool + whether or not data exists + """ + + def __init__(self): + self.internal_data = None + self.fname = None + self.iprn = None + self.binary = False + self.data_storage_type = None + self.modified = False + + def get_record(self): + rec = {} + if self.internal_data is not None: + rec["data"] = copy.deepcopy(self.internal_data) + if self.fname is not None: + rec["filename"] = self.fname + if self.iprn is not None: + rec["iprn"] = self.iprn + if self.binary is not None: + rec["binary"] = self.binary + return rec + + def set_record(self, rec): + if "data" in rec: + self.internal_data = rec["data"] + if "filename" in rec: + self.fname = rec["filename"] + if "iprn" in rec: + self.iprn = rec["iprn"] + if "binary" in rec: + self.binary = rec["binary"] + + def set_internal(self, internal_data): + self.data_storage_type = DataStorageType.internal_array + self.internal_data = internal_data + self.fname = None + self.binary = False + + def set_external(self, fname, data=None): + self.data_storage_type = DataStorageType.external_file + self.internal_data = data + self.fname = fname + + @property + def internal_size(self): + if not isinstance(self.internal_data, pandas.DataFrame): + return 0 + else: + return len(self.internal_data) + + def has_data(self): + if self.data_storage_type == DataStorageType.internal_array: + return self.internal_data is not None + else: + return self.fname is not None + + +class MFPandasList(mfdata.MFMultiDimVar, DataListInterface): + """ + Provides an interface for the user to access and update MODFLOW + list data using Pandas. MFPandasList objects are not designed to be + directly constructed by the end user. When a flopy for MODFLOW 6 package + object is constructed, the appropriate MFList objects are automatically + built. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + model_or_sim : MFSimulation or MFModel + parent model, or if not part of a model, parent simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray or None + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + package : MFPackage + parent package + block : MFBlock + parnet block + """ + + def __init__( + self, + sim_data, + model_or_sim, + structure, + data=None, + enable=None, + path=None, + dimensions=None, + package=None, + block=None, + ): + super().__init__( + sim_data, model_or_sim, structure, enable, path, dimensions + ) + self._data_storage = self._new_storage() + self._package = package + self._block = block + self._last_line_info = [] + self._data_line = None + self._temp_dict = {} + self._crnt_line_num = 1 + self._data_header = None + self._header_names = None + self._data_types = None + self._data_item_names = None + self._mg = None + self._current_key = 0 + self._max_file_size = 1000000000000000 + if self._model_or_sim.type == "Model": + self._mg = self._model_or_sim.modelgrid + + if data is not None: + try: + self.set_data(data, True) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + structure.get_model(), + structure.get_package(), + path, + "setting data", + structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + sim_data.debug, + ex, + ) + + @property + def data_type(self): + """Type of data (DataType) stored in the list""" + return DataType.list + + @property + def package(self): + """Package object that this data belongs to.""" + return self._package + + @property + def dtype(self): + """Type of data (numpy.dtype) stored in the list""" + return self.get_dataframe().dtype + + def _append_type_list(self, data_name, data_type, include_header=False): + if include_header: + self._data_header[data_name] = data_type + self._header_names.append(data_name) + self._data_types.append(data_type) + + def _process_open_close_line(self, arr_line, store=True): + """ + Process open/close line extracting the multiplier, print format, + binary flag, data file path, and any comments + """ + data_dim = self._data_dimensions + ( + multiplier, + print_format, + binary, + data_file, + data, + comment, + ) = process_open_close_line( + arr_line, + data_dim, + self._data_type, + self._simulation_data.debug, + store, + ) + # add to active list of external files + model_name = data_dim.package_dim.model_dim[0].model_name + self._simulation_data.mfpath.add_ext_file(data_file, model_name) + + return data, multiplier, print_format, binary, data_file + + def _add_cellid_fields(self, data, keep_existing=False): + """ + Add cellid fields to a Pandas DataFrame and drop the layer, + row, column, cell, node, fields that the cellid is based on + """ + for data_item in self.structure.data_item_structures: + if data_item.type == DatumType.integer: + if data_item.name.lower() == "cellid": + columns = data.columns.tolist() + if isinstance(self._mg, StructuredGrid): + if ( + "layer" in columns + and "row" in columns + and "column" in columns + ): + data["cellid"] = data[ + ["layer", "row", "column"] + ].apply(tuple, axis=1) + if not keep_existing: + data = data.drop( + columns=["layer", "row", "column"] + ) + elif isinstance(self._mg, VertexGrid): + cell_2 = None + if "cell" in columns: + cell_2 = "cell" + elif "ncpl" in columns: + cell_2 = "ncpl" + if cell_2 is not None and "layer" in columns: + data["cellid"] = data[["layer", cell_2]].apply( + tuple, axis=1 + ) + if not keep_existing: + data = data.drop(columns=["layer", cell_2]) + elif isinstance(self._mg, UnstructuredGrid): + if "node" in columns: + data["cellid"] = data[["node"]].apply( + tuple, axis=1 + ) + if not keep_existing: + data = data.drop(columns=["node"]) + else: + raise MFDataException( + "ERROR: Unrecognized model grid " + "{str(self._mg)} not supported by MFBasicList" + ) + # reorder columns + column_headers = data.columns.tolist() + column_headers.insert(0, column_headers.pop()) + data = data[column_headers] + + return data + + def _remove_cellid_fields(self, data): + """remove cellid fields from data""" + for data_item in self.structure.data_item_structures: + if data_item.type == DatumType.integer: + if data_item.name.lower() == "cellid": + # if there is a cellid field, remove it + if "cellid" in data.columns: + return data.drop("cellid", axis=1) + return data + + def _get_cellid_size(self, data_item_name): + """get the number of spatial coordinates used in the cellid""" + model_num = datautil.DatumUtil.cellid_model_num( + data_item_name, + self._data_dimensions.structure.model_data, + self._data_dimensions.package_dim.model_dim, + ) + model_grid = self._data_dimensions.get_model_grid(model_num=model_num) + return model_grid.get_num_spatial_coordinates() + + def _build_data_header(self): + """ + Constructs lists of data column header names and data column types + based on data structure information, boundname and aux information, + and model discretization type. + """ + # initialize + self._data_header = {} + self._header_names = [] + self._data_types = [] + self._data_item_names = [] + s_type = pandas.StringDtype + f_type = np.float64 + i_type = np.int64 + data_dim = self._data_dimensions + # loop through data structure definition information + for data_item, index in zip( + self.structure.data_item_structures, + range(0, len(self.structure.data_item_structures)), + ): + if data_item.name.lower() == "aux": + # get all of the aux variables for this dataset + aux_var_names = data_dim.package_dim.get_aux_variables() + if aux_var_names is not None: + for aux_var_name in aux_var_names[0]: + if aux_var_name.lower() != "auxiliary": + self._append_type_list(aux_var_name, f_type) + self._data_item_names.append(aux_var_name) + elif data_item.name.lower() == "boundname": + # see if boundnames is enabled for this dataset + if data_dim.package_dim.boundnames(): + self._append_type_list("boundname", s_type) + self._data_item_names.append("boundname") + else: + if data_item.type == DatumType.keyword: + self._append_type_list(data_item.name, s_type) + elif data_item.type == DatumType.string: + self._append_type_list(data_item.name, s_type) + elif data_item.type == DatumType.integer: + if data_item.name.lower() == "cellid": + # get the appropriate cellid column headings for the + # model's discretization type + if isinstance(self._mg, StructuredGrid): + self._append_type_list("layer", i_type, True) + self._append_type_list("row", i_type, True) + self._append_type_list("column", i_type, True) + elif isinstance(self._mg, VertexGrid): + self._append_type_list("layer", i_type, True) + self._append_type_list("cell", i_type, True) + elif isinstance(self._mg, UnstructuredGrid): + self._append_type_list("node", i_type, True) + else: + raise MFDataException( + "ERROR: Unrecognized model grid " + "{str(self._mg)} not supported by MFBasicList" + ) + else: + self._append_type_list(data_item.name, i_type) + elif data_item.type == DatumType.double_precision: + self._append_type_list(data_item.name, f_type) + else: + self._data_header = None + self._header_names = None + self._data_item_names.append(data_item.name) + + @staticmethod + def _unique_column_name(data, col_base_name): + """generate a unique column name based on "col_base_name" """ + col_name = col_base_name + idx = 2 + while col_name in data: + col_name = f"{col_base_name}_{idx}" + idx += 1 + return col_name + + def _untuple_cellids(self, pdata): + """ + For all cellids in "pdata", convert them to layer, row, column fields and + and then drop the cellids from "pdata". Returns the updated "pdata". + """ + if pdata is None or len(pdata) == 0: + return pdata, 0 + fields_to_correct = [] + data_idx = 0 + # find cellid columns that need to be fixed + columns = pdata.columns + for data_item in self.structure.data_item_structures: + if data_idx >= len(columns) + 1: + break + if ( + data_item.type == DatumType.integer + and data_item.name.lower() == "cellid" + ): + if isinstance(pdata.iloc[0, data_idx], tuple): + fields_to_correct.append((data_idx, columns[data_idx])) + data_idx += 1 + else: + data_idx += self._get_cellid_size(data_item.name) + else: + data_idx += 1 + + # fix columns + for field_idx, column_name in fields_to_correct: + # add individual layer/row/column/cell/node columns + if isinstance(self._mg, StructuredGrid): + pdata.insert( + loc=field_idx, + column=self._unique_column_name(pdata, "layer"), + value=pdata.apply(lambda x: x[column_name][0], axis=1), + ) + pdata.insert( + loc=field_idx + 1, + column=self._unique_column_name(pdata, "row"), + value=pdata.apply(lambda x: x[column_name][1], axis=1), + ) + pdata.insert( + loc=field_idx + 2, + column=self._unique_column_name(pdata, "column"), + value=pdata.apply(lambda x: x[column_name][2], axis=1), + ) + elif isinstance(self._mg, VertexGrid): + pdata.insert( + loc=field_idx, + column=self._unique_column_name(pdata, "layer"), + value=pdata.apply(lambda x: x[column_name][0], axis=1), + ) + pdata.insert( + loc=field_idx + 1, + column=self._unique_column_name(pdata, "cell"), + value=pdata.apply(lambda x: x[column_name][1], axis=1), + ) + elif isinstance(self._mg, UnstructuredGrid): + if column_name == "node": + # fixing a problem where node was specified as a tuple + # make sure new column is named properly + column_name = "node_2" + pdata = pdata.rename(columns={"node": column_name}) + pdata.insert( + loc=field_idx, + column=self._unique_column_name(pdata, "node"), + value=pdata.apply(lambda x: x[column_name][0], axis=1), + ) + # remove cellid tuple + pdata = pdata.drop(column_name, axis=1) + return pdata, len(fields_to_correct) + + def _resolve_columns(self, data): + """resolve the column headings for a specific dataset provided""" + if len(data) == 0: + return self._header_names, False + if len(data[0]) == len(self._header_names) or len(data[0]) == 0: + return self._header_names, False + + if len(data[0]) == len(self._data_item_names): + return self._data_item_names, True + + if ( + len(data[0]) == len(self._data_item_names) - 1 + and self._data_item_names[-1] == "boundname" + ): + return self._data_item_names[:-1], True + + return None, None + + def _untuple_recarray(self, rec): + rec_list = rec.tolist() + for row, line in enumerate(rec_list): + for column, data in enumerate(line): + if isinstance(data, tuple) and len(data) == 1: + line_lst = list(line) + line_lst[column] = data[0] + rec_list[row] = tuple(line_lst) + return rec_list + + def set_data(self, data, autofill=False, check_data=True, append=False): + """Sets the contents of the data to "data". Data can have the + following formats: + 1) recarray - recarray containing the datalist + 2) [(line_one), (line_two), ...] - list where each line of the + datalist is a tuple within the list + If the data is transient, a dictionary can be used to specify each + stress period where the dictionary key is - 1 and + the dictionary value is the datalist data defined above: + {0:ndarray, 1:[(line_one), (line_two), ...], 2:{'filename':filename}) + + Parameters + ---------- + data : ndarray/list/dict + Data to set + autofill : bool + Automatically correct data + check_data : bool + Whether to verify the data + append : bool + Append to existing data + + """ # (re)build data header + self._build_data_header() + if isinstance(data, dict) and not self.has_data(): + MFPandasList.set_record(self, data) + return + if isinstance(data, np.recarray): + # verify data shape of data (recarray) + if len(data) == 0: + # create empty dataset + data = pandas.DataFrame(columns=self._header_names) + elif len(data[0]) != len(self._header_names): + if len(data[0]) == len(self._data_item_names): + # data most likely being stored with cellids as tuples, + # create a dataframe and untuple the cellids + data = pandas.DataFrame( + data, columns=self._data_item_names + ) + data = self._untuple_cellids(data)[0] + # make sure columns are still in correct order + data = pandas.DataFrame(data, columns=self._header_names) + else: + raise MFDataException( + f"ERROR: Data list {self._data_name} supplied the " + f"wrong number of columns of data, expected " + f"{len(self._data_item_names)} got {len(data[0])}." + ) + else: + # data size matches the expected header names, create a pandas + # dataframe from the data + data_new = pandas.DataFrame(data, columns=self._header_names) + if not self._dataframe_check(data_new): + data_list = self._untuple_recarray(data) + data = pandas.DataFrame( + data_list, columns=self._header_names + ) + else: + data, count = self._untuple_cellids(data_new) + if count > 0: + # make sure columns are still in correct order + data = pandas.DataFrame( + data, columns=self._header_names + ) + elif isinstance(data, list) or isinstance(data, tuple): + if not (isinstance(data[0], list) or isinstance(data[0], tuple)): + # get data in the format of a tuple of lists (or tuples) + data = [data] + # resolve the data's column headings + columns = self._resolve_columns(data)[0] + if columns is None: + message = ( + f"ERROR: Data list {self._data_name} supplied the " + f"wrong number of columns of data, expected " + f"{len(self._data_item_names)} got {len(data[0])}." + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + "setting list data", + self._data_dimensions.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + + if len(data[0]) == 0: + # create empty dataset + data = pandas.DataFrame(columns=columns) + else: + # create dataset + data = pandas.DataFrame(data, columns=columns) + if ( + self._data_item_names[-1] == "boundname" + and "boundname" not in columns + ): + # add empty boundname column + data["boundname"] = "" + # get rid of tuples from cellids + data, count = self._untuple_cellids(data) + if count > 0: + # make sure columns are still in correct order + data = pandas.DataFrame(data, columns=self._header_names) + elif isinstance(data, pandas.DataFrame): + if len(data.columns) != len(self._header_names): + message = ( + f"ERROR: Data list {self._data_name} supplied the " + f"wrong number of columns of data, expected " + f"{len(self._data_item_names)} got {len(data[0])}.\n" + f"Data columns supplied: {data.columns}\n" + f"Data columns expected: {self._header_names}" + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + "setting list data", + self._data_dimensions.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + # set correct data header names + data = data.set_axis(self._header_names, axis=1) + else: + message = ( + f"ERROR: Data list {self._data_name} is an unsupported type: " + f"{type(data)}." + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + "setting list data", + self._data_dimensions.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + + data_storage = self._get_storage() + if append: + # append data to existing dataframe + current_data = self._get_dataframe() + if current_data is not None: + data = pandas.concat([current_data, data]) + if data_storage.data_storage_type == DataStorageType.external_file: + # store external data until next write + data_storage.internal_data = data + else: + # store data internally + data_storage.set_internal(data) + data_storage.modified = True + + def has_modified_ext_data(self): + """check to see if external data has been modified since last read""" + data_storage = self._get_storage() + return ( + data_storage.data_storage_type == DataStorageType.external_file + and data_storage.internal_data is not None + ) + + def binary_ext_data(self): + """check for binary data""" + data_storage = self._get_storage() + return data_storage.binary + + def to_array(self, kper=0, mask=False): + """Convert stress period boundary condition (MFDataList) data for a + specified stress period to a 3-D numpy array. + + Parameters + ---------- + kper : int + MODFLOW zero-based stress period number to return (default is + zero) + mask : bool + return array with np.NaN instead of zero + + Returns + ---------- + out : dict of numpy.ndarrays + Dictionary of 3-D numpy arrays containing the stress period data + for a selected stress period. The dictionary keys are the + MFDataList dtype names for the stress period data.""" + sarr = self.get_data(key=kper) + model_grid = self._data_dimensions.get_model_grid() + return list_to_array(sarr, model_grid, kper, mask) + + def set_record(self, record, autofill=False, check_data=True): + """Sets the contents of the data and metadata to "data_record". + Data_record is a dictionary with has the following format: + {'filename':filename, 'binary':True/False, 'data'=data} + To store to file include 'filename' in the dictionary. + + Parameters + ---------- + record : ndarray/list/dict + Data and metadata to set + autofill : bool + Automatically correct data + check_data : bool + Whether to verify the data + + """ + if isinstance(record, dict): + data_storage = self._get_storage() + if "filename" in record: + data_storage.set_external(record["filename"]) + if "binary" in record: + if ( + record["binary"] + and self._data_dimensions.package_dim.boundnames() + ): + message = ( + "Unable to store list data ({}) to a binary " + "file when using boundnames" + ".".format(self._data_dimensions.structure.name) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + "writing list data to binary file", + self._data_dimensions.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ) + data_storage.binary = record["binary"] + if "data" in record: + # data gets written out to file + MFPandasList.set_data(self, record["data"]) + # get file path + fd_file_path = self._get_file_path() + # make sure folder exists + folder_path = os.path.split(fd_file_path)[0] + if not os.path.exists(folder_path): + os.makedirs(folder_path) + # store data + self._write_file_entry(fd_file_path) + else: + if "data" in record: + data_storage.modified = True + data_storage.set_internal(None) + MFPandasList.set_data(self, record["data"]) + if "iprn" in record: + data_storage.iprn = record["iprn"] + + def append_data(self, data): + """Appends "data" to the end of this list. Assumes data is in a format + that can be appended directly to a pandas dataframe. + + Parameters + ---------- + data : list(tuple) + Data to append. + + """ + try: + self._resync() + if self._get_storage() is None: + self._data_storage = self._new_storage() + data_storage = self._get_storage() + if ( + data_storage.data_storage_type + == DataStorageType.internal_array + ): + # update internal data + MFPandasList.set_data(self, data, append=True) + elif ( + data_storage.data_storage_type == DataStorageType.external_file + ): + # get external data from file + external_data = self._get_dataframe() + if isinstance(data, list): + # build dataframe + data = pandas.DataFrame( + data, columns=external_data.columns + ) + # concatenate + data = pandas.concat([external_data, data]) + # store + ext_record = self._get_record() + ext_record["data"] = data + MFPandasList.set_record(self, ext_record) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "appending data", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self._simulation_data.debug, + ex, + ) + + def append_list_as_record(self, record): + """Appends the list `record` as a single record in this list's + dataframe. Assumes "data" has the correct dimensions. + + Parameters + ---------- + record : list + List to be appended as a single record to the data's existing + recarray. + + """ + self._resync() + try: + # store + self.append_data([record]) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "appending data", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self._simulation_data.debug, + ex, + ) + + def update_record(self, record, key_index): + """Updates a record at index "key_index" with the contents of "record". + If the index does not exist update_record appends the contents of + "record" to this list's recarray. + + Parameters + ---------- + record : list + New record to update data with + key_index : int + Stress period key of record to update. Only used in transient + data types. + """ + self.append_list_as_record(record) + + def store_internal( + self, + check_data=True, + ): + """Store all data internally. + + Parameters + ---------- + check_data : bool + Verify data prior to storing + + """ + storage = self._get_storage() + # check if data is already stored external + if ( + storage is None + or storage.data_storage_type == DataStorageType.external_file + ): + data = self._get_dataframe() + # if not empty dataset + if data is not None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print(f"Storing {self.structure.name} internally...") + internal_data = { + "data": data, + } + MFPandasList.set_record( + self, internal_data, check_data=check_data + ) + + def store_as_external_file( + self, + external_file_path, + binary=False, + replace_existing_external=True, + check_data=True, + ): + """Store all data externally in file external_file_path. the binary + allows storage in a binary file. If replace_existing_external is set + to False, this method will not do anything if the data is already in + an external file. + + Parameters + ---------- + external_file_path : str + Path to external file + binary : bool + Store data in a binary file + replace_existing_external : bool + Whether to replace an existing external file. + check_data : bool + Verify data prior to storing + + """ + # only store data externally (do not subpackage info) + if self.structure.construct_package is None: + storage = self._get_storage() + # check if data is already stored external + if ( + replace_existing_external + or storage is None + or storage.data_storage_type == DataStorageType.internal_array + or storage.data_storage_type + == DataStorageType.internal_constant + ): + data = self._get_dataframe() + # if not empty dataset + if data is not None: + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.verbose.value + ): + print( + "Storing {} to external file {}.." + ".".format(self.structure.name, external_file_path) + ) + external_data = { + "filename": external_file_path, + "data": data, + "binary": binary, + } + MFPandasList.set_record( + self, external_data, check_data=check_data + ) + + def external_file_name(self): + """Returns external file name, or None if this is not external data.""" + storage = self._get_storage() + if storage is None: + return None + if ( + storage.data_storage_type == DataStorageType.external_file + and storage.fname is not None + and storage.fname != "" + ): + return storage.fname + return None + + @staticmethod + def _file_data_to_memory(fd_data_file, first_line): + """ + scan data file from starting point to find the extent of the data + + Parameters + ---------- + fd_data_file : file descriptor + File with data to scan. File location should be at the beginning + of the data. + + Returns + ------- + list, str : data from file, next line in file after data + """ + data_lines = [] + clean_first_line = first_line.strip().lower() + if clean_first_line.startswith("end"): + return data_lines, fd_data_file.readline() + if len(clean_first_line) > 0 and clean_first_line[0] != "#": + data_lines.append(clean_first_line) + line = fd_data_file.readline() + while line: + line_mod = line.strip().lower() + if line_mod.startswith("end"): + return data_lines, line + if len(line_mod) > 0 and line_mod[0] != "#": + data_lines.append(line_mod) + line = fd_data_file.readline() + return data_lines, "" + + def _dataframe_check(self, data_frame): + valid = data_frame.shape[0] > 0 + if valid: + for name in self._header_names: + if ( + name != "boundname" + and data_frame[name].isnull().values.any() + ): + valid = False + break + return valid + + def _try_pandas_read(self, fd_data_file): + delimiter_list = ["\\s+", ","] + for delimiter in delimiter_list: + try: + # read flopy formatted data, entire file + data_frame = pandas.read_csv( + fd_data_file, + sep=delimiter, + names=self._header_names, + dtype=self._data_header, + comment="#", + index_col=False, + skipinitialspace=True, + ) + except BaseException: + fd_data_file.seek(0) + continue + + # basic check for valid dataset + if self._dataframe_check(data_frame): + return data_frame + else: + fd_data_file.seek(0) + return None + + def _read_text_data(self, fd_data_file, first_line, external_file=False): + """ + read list data from data file + + Parameters + ---------- + fd_data_file : file descriptor + File with data. File location should be at the beginning of the + data. + + external_file : bool + whether this is an external file + + Returns + ------- + DataFrame : file's list data + list : containing boolean for success of operation and the next line of + data in the file + """ + # initialize + data_frame = None + return_val = [False, None] + + # build header + self._build_data_header() + file_data, next_line = self._file_data_to_memory( + fd_data_file, first_line + ) + io_file_data = io.StringIO("\n".join(file_data)) + if external_file: + data_frame = self._try_pandas_read(io_file_data) + if data_frame is not None: + self._decrement_id_fields(data_frame) + else: + # get number of rows of data + if len(file_data) > 0: + data_frame = self._try_pandas_read(io_file_data) + if data_frame is not None: + self._decrement_id_fields(data_frame) + return_val = [True, fd_data_file.readline()] + + if data_frame is None: + # read user formatted data using MFList class + list_data = MFList( + self._simulation_data, + self._model_or_sim, + self.structure, + None, + True, + self.path, + self._data_dimensions.package_dim, + self._package, + self._block, + ) + # start in original location + io_file_data.seek(0) + return_val = list_data.load( + None, io_file_data, self._block.block_headers[-1] + ) + rec_array = list_data.get_data() + if rec_array is not None: + data_frame = pandas.DataFrame(rec_array) + data_frame = self._untuple_cellids(data_frame)[0] + return_val = [True, fd_data_file.readline()] + else: + data_frame = None + return data_frame, return_val + + def _save_binary_data(self, fd_data_file, data): + # write + file_access = MFFileAccessList( + self.structure, + self._data_dimensions, + self._simulation_data, + self._path, + self._current_key, + ) + file_access.write_binary_file( + self._dataframe_to_recarray(data), + fd_data_file, + self._model_or_sim.modeldiscrit, + ) + data_storage = self._get_storage() + data_storage.internal_data = None + + def has_data(self, key=None): + """Returns whether this MFList has any data associated with it.""" + try: + if self._get_storage() is None: + return False + return self._get_storage().has_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "checking for data", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self._simulation_data.debug, + ex, + ) + + def _load_external_data(self, data_storage): + """loads external data into a panda's dataframe""" + file_path = self._resolve_ext_file_path(data_storage) + # parse next line in file as data header + if data_storage.binary: + file_access = MFFileAccessList( + self.structure, + self._data_dimensions, + self._simulation_data, + self._path, + self._current_key, + ) + np_data = file_access.read_binary_data_from_file( + file_path, + self._model_or_sim.modeldiscrit, + build_cellid=False, + ) + pd_data = pandas.DataFrame(np_data) + if "col" in pd_data: + # keep layer/row/column names consistent + pd_data = pd_data.rename(columns={"col": "column"}) + self._decrement_id_fields(pd_data) + else: + with open(file_path, "r") as fd_data_file: + pd_data, return_val = self._read_text_data( + fd_data_file, "", True + ) + return pd_data + + def load( + self, + first_line, + file_handle, + block_header, + pre_data_comments=None, + external_file_info=None, + ): + """Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + This method was only designed for internal FloPy use and is not + recommended for end users. + + Parameters + ---------- + first_line : str + A string containing the first line of data in this list. + file_handle : file descriptor + A file handle for the data file which points to the second + line of data for this list + block_header : MFBlockHeader + Block header object that contains block header information + for the block containing this data + pre_data_comments : MFComment + Comments immediately prior to the data + external_file_info : list + Contains information about storing files externally + Returns + ------- + more data : bool, + next data line : str + + """ + data_storage = self._get_storage() + data_storage.modified = False + # parse first line to determine if this is internal or external data + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(first_line) + if arr_line and ( + len(arr_line[0]) >= 2 and arr_line[0][:3].upper() == "END" + ): + return [False, arr_line] + if len(arr_line) >= 2 and arr_line[0].upper() == "OPEN/CLOSE": + try: + ( + data, + multiplier, + iprn, + binary, + data_file, + ) = self._process_open_close_line(arr_line) + except Exception as ex: + message = ( + "An error occurred while processing the following " + "open/close line: {}".format(arr_line) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "processing open/close line", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + self._simulation_data.debug, + ex, + ) + data_storage.set_external(data_file, data) + data_storage.binary = binary + data_storage.iprn = iprn + return_val = [False, None] + # else internal + else: + # read data into pandas dataframe + pd_data, return_val = self._read_text_data( + file_handle, first_line, False + ) + # verify this is the end of the block? + + # store internal data + data_storage.set_internal(pd_data) + return return_val + + def _new_storage(self): + return {"Data": PandasListStorage()} + + def _get_storage(self): + return self._data_storage["Data"] + + def _get_id_fields(self, data_frame): + """ + assemble a list of id fields in this dataset + + Parameters + ---------- + data_frame : DataFrame + data for this list + + Returns + ------- + list of column names that are id fields + """ + id_fields = [] + # loop through the data structure + for idx, data_item_struct in enumerate( + self.structure.data_item_structures + ): + if data_item_struct.type == DatumType.keystring: + # handle id fields for keystring + # ***Code not necessary for this version + ks_key = data_frame.iloc[0, idx].lower() + if ks_key in data_item_struct.keystring_dict: + data_item_ks = data_item_struct.keystring_dict[ks_key] + else: + ks_key = f"{ks_key}record" + if ks_key in data_item_struct.keystring_dict: + data_item_ks = data_item_struct.keystring_dict[ks_key] + else: + continue + if isinstance(data_item_ks, MFDataStructure): + dis = data_item_ks.data_item_structures + for data_item in dis: + self._update_id_fields( + id_fields, data_item, data_frame + ) + else: + self._update_id_fields(id_fields, data_item_ks, data_frame) + else: + self._update_id_fields(id_fields, data_item_struct, data_frame) + return id_fields + + def _update_id_fields(self, id_fields, data_item_struct, data_frame): + """ + update the "id_fields" list with new field(s) based on the + an item in the expected data structure and the data provided. + """ + if data_item_struct.numeric_index or data_item_struct.is_cellid: + if data_item_struct.name.lower() == "cellid": + if isinstance(self._mg, StructuredGrid): + id_fields.append("layer") + id_fields.append("row") + id_fields.append("column") + elif isinstance(self._mg, VertexGrid): + id_fields.append("layer") + id_fields.append("cell") + elif isinstance(self._mg, UnstructuredGrid): + id_fields.append("node") + else: + raise MFDataException( + "ERROR: Unrecognized model grid " + "{str(self._mg)} not supported by MFBasicList" + ) + else: + for col in data_frame.columns: + if col.startswith(data_item_struct.name): + data_item_len = len(data_item_struct.name) + if len(col) > data_item_len: + col_end = col[data_item_len:] + if ( + len(col_end) > 1 + and col_end[0] == "_" + and datautil.DatumUtil.is_int(col_end[1:]) + ): + id_fields.append(col) + else: + id_fields.append(data_item_struct.name) + + def _increment_id_fields(self, data_frame): + """increment all id fields by 1 (reverse for negative values)""" + dtypes = data_frame.dtypes + for id_field in self._get_id_fields(data_frame): + if id_field in data_frame: + if id_field in dtypes and dtypes[id_field].str != " 0 and rel_path != ".": + # include model relative path in external file path + # only if model relative path is not already in external + # file path i.e. when reading! + fp_rp_l = fp_relative.split(os.path.sep) + rp_l_r = rel_path.split(os.path.sep)[::-1] + for i, rp in enumerate(rp_l_r): + if rp != fp_rp_l[len(rp_l_r) - i - 1]: + fp_relative = os.path.join(rp, fp_relative) + fp = self._simulation_data.mfpath.resolve_path( + fp_relative, model_name + ) + else: + if fp_relative is not None: + fp = os.path.join( + self._simulation_data.mfpath.get_sim_path(), fp_relative + ) + else: + fp = self._simulation_data.mfpath.get_sim_path() + return fp + + def _dataframe_to_recarray(self, data_frame): + # convert cellids to tuple + df_rec = self._add_cellid_fields(data_frame, False) + + # convert to recarray + return df_rec.to_records(index=False) + + def _get_data(self): + dataframe = self._get_dataframe() + if dataframe is None: + return None + return self._dataframe_to_recarray(dataframe) + + def _get_dataframe(self): + """get and return dataframe for this list data""" + data_storage = self._get_storage() + if data_storage is None or data_storage.data_storage_type is None: + block_exists = self._block.header_exists( + self._current_key, self.path + ) + if block_exists: + self._build_data_header() + return pandas.DataFrame(columns=self._header_names) + else: + return None + if data_storage.data_storage_type == DataStorageType.internal_array: + data = copy.deepcopy(data_storage.internal_data) + else: + if data_storage.internal_data is not None: + # latest data is in internal cache + data = copy.deepcopy(data_storage.internal_data) + else: + # load data from file and return + data = self._load_external_data(data_storage) + return data + + def get_dataframe(self): + """Returns the list's data as a dataframe. + + Returns + ------- + data : DataFrame + + """ + return self._get_dataframe() + + def get_data(self, apply_mult=False, **kwargs): + """Returns the list's data as a recarray. + + Parameters + ---------- + apply_mult : bool + Whether to apply a multiplier. + + Returns + ------- + data : recarray + + """ + return self._get_data() + + def get_record(self, data_frame=False): + """Returns the list's data and metadata in a dictionary. Data is in + key "data" and metadata in keys "filename" and "binary". + + Returns + ------- + data_record : dict + + """ + return self._get_record(data_frame) + + def _get_record(self, data_frame=False): + """Returns the list's data and metadata in a dictionary. Data is in + key "data" and metadata in keys "filename" and "binary". + + Returns + ------- + data_record : dict + + """ + try: + if self._get_storage() is None: + return None + record = self._get_storage().get_record() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "getting record", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + None, + self._simulation_data.debug, + ex, + ) + if not data_frame: + if "data" not in record: + record["data"] = self._get_data() + elif record["data"] is not None: + data = copy.deepcopy(record["data"]) + record["data"] = self._dataframe_to_recarray(data) + else: + if "data" not in record: + record["data"] = self._get_dataframe() + return record + + def write_file_entry( + self, + fd_data_file, + ext_file_action=ExtFileAction.copy_relative_paths, + fd_main=None, + ): + """ + Writes file entry to file, or if fd_data_file is None returns file + entry as string. + + Parameters + ---------- + fd_data_file : file descriptor + where data is written + ext_file_action : ExtFileAction + What action to perform on external files + fd_main + file descriptor where open/close string should be written (for + external file data) + + Returns + ------- + file entry : str + + """ + return self._write_file_entry(fd_data_file, ext_file_action, fd_main) + + def get_file_entry( + self, + ext_file_action=ExtFileAction.copy_relative_paths, + ): + """Returns a string containing the data formatted for a MODFLOW 6 + file. + + Parameters + ---------- + ext_file_action : ExtFileAction + How to handle external paths. + + Returns + ------- + file entry : str + + """ + return self._write_file_entry(None) + + def _write_file_entry( + self, + fd_data_file, + ext_file_action=ExtFileAction.copy_relative_paths, + fd_main=None, + ): + """ + Writes file entry to file, or if fd_data_file is None returns file + entry as string. + + Parameters + ---------- + fd_data_file : file descriptor + Where data is written + ext_file_action : ExtFileAction + What action to perform on external files + fd_main + file descriptor where open/close string should be written (for + external file data) + Returns + ------- + result of pandas to_csv call + """ + data_storage = self._get_storage() + if data_storage is None: + return "" + if ( + data_storage.data_storage_type == DataStorageType.external_file + and fd_main is not None + ): + indent = self._simulation_data.indent_string + ext_string, fname = self._get_external_formatting_str( + data_storage.fname, + None, + data_storage.binary, + data_storage.iprn, + DataStructureType.recarray, + ext_file_action, + ) + data_storage.fname = fname + fd_main.write(f"{indent}{indent}{ext_string}") + if data_storage is None or data_storage.internal_data is None: + return "" + # Loop through data pieces + data = self._remove_cellid_fields(data_storage.internal_data) + if ( + data_storage.data_storage_type == DataStorageType.internal_array + or not data_storage.binary + or fd_data_file is None + ): + # add spacer column + if "leading_space" not in data: + data.insert(loc=0, column="leading_space", value="") + if "leading_space_2" not in data: + data.insert(loc=0, column="leading_space_2", value="") + + result = "" + # if data is internal or has been modified + if ( + data_storage.data_storage_type == DataStorageType.internal_array + or data is not None + or fd_data_file is None + ): + if ( + data_storage.data_storage_type == DataStorageType.external_file + and data_storage.binary + and fd_data_file is not None + ): + # write old way using numpy + self._save_binary_data(fd_data_file, data) + else: + if data.shape[0] == 0: + if fd_data_file is None or not isinstance( + fd_data_file, io.TextIOBase + ): + result = "\n" + else: + # no data, just write empty line + fd_data_file.write("\n") + else: + # convert data to 1-based + self._increment_id_fields(data) + # write converted data + float_format = ( + f"%{self._simulation_data.reg_format_str[2:-1]}" + ) + result = data.to_csv( + fd_data_file, + sep=" ", + header=False, + index=False, + float_format=float_format, + lineterminator="\n", + ) + # clean up + data_storage.modified = False + self._decrement_id_fields(data) + if ( + data_storage.data_storage_type + == DataStorageType.external_file + ): + data_storage.internal_data = None + + if data_storage.internal_data is not None: + # clean up + if "leading_space" in data_storage.internal_data: + data_storage.internal_data = data_storage.internal_data.drop( + columns="leading_space" + ) + if "leading_space_2" in data_storage.internal_data: + data_storage.internal_data = data_storage.internal_data.drop( + columns="leading_space_2" + ) + return result + + def _get_file_path(self): + """ + gets the file path to the data + + Returns + ------- + file_path : file path to data + + """ + data_storage = self._get_storage() + if data_storage.fname is None: + return None + if self._model_or_sim.type == "model": + rel_path = self._simulation_data.mfpath.model_relative_path[ + self._model_or_sim.name + ] + fp_relative = data_storage.fname + if rel_path is not None and len(rel_path) > 0 and rel_path != ".": + # include model relative path in external file path + # only if model relative path is not already in external + # file path i.e. when reading! + fp_rp_l = fp_relative.split(os.path.sep) + rp_l_r = rel_path.split(os.path.sep)[::-1] + for i, rp in enumerate(rp_l_r): + if rp != fp_rp_l[len(rp_l_r) - i - 1]: + fp_relative = os.path.join(rp, fp_relative) + return self._simulation_data.mfpath.resolve_path( + fp_relative, self._model_or_sim.name + ) + else: + return os.path.join( + self._simulation_data.mfpath.get_sim_path(), data_storage.fname + ) + + def plot( + self, + key=None, + names=None, + filename_base=None, + file_extension=None, + mflay=None, + **kwargs, + ): + """ + Plot boundary condition (MfList) data + + Parameters + ---------- + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from ...plot import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + if "cellid" not in self.dtype.names: + return + + PlotUtilities._plot_mflist_helper( + mflist=self, + key=key, + kper=None, + names=names, + filename_base=None, + file_extension=None, + mflay=None, + **kwargs, + ) + + +class MFPandasTransientList( + MFPandasList, mfdata.MFTransient, DataListInterface +): + """ + Provides an interface for the user to access and update MODFLOW transient + pandas list data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + """ + + def __init__( + self, + sim_data, + model_or_sim, + structure, + enable=True, + path=None, + dimensions=None, + package=None, + block=None, + ): + super().__init__( + sim_data=sim_data, + model_or_sim=model_or_sim, + structure=structure, + data=None, + enable=enable, + path=path, + dimensions=dimensions, + package=package, + block=block, + ) + self.repeating = True + self.empty_keys = {} + + @property + def data_type(self): + return DataType.transientlist + + @property + def dtype(self): + data = self.get_data() + if len(data) > 0: + if 0 in data: + return data[0].dtype + else: + return next(iter(data.values())).dtype + else: + return None + + @property + def plottable(self): + """If this list data is plottable""" + if self.model is None: + return False + else: + return True + + @property + def data(self): + """Returns list data. Calls get_data with default parameters.""" + return self.get_data() + + @property + def dataframe(self): + """Returns list data. Calls get_data with default parameters.""" + return self.get_dataframe() + + def to_array(self, kper=0, mask=False): + """Returns list data as an array.""" + return super().to_array(kper, mask) + + def remove_transient_key(self, transient_key): + """Remove transient stress period key. Method is used + internally by FloPy and is not intended to the end user. + + """ + if transient_key in self._data_storage: + del self._data_storage[transient_key] + + def add_transient_key(self, transient_key): + """Adds a new transient time allowing data for that time to be stored + and retrieved using the key `transient_key`. Method is used + internally by FloPy and is not intended to the end user. + + Parameters + ---------- + transient_key : int + Zero-based stress period to add + + """ + super().add_transient_key(transient_key) + self._data_storage[transient_key] = PandasListStorage() + + def store_as_external_file( + self, + external_file_path, + binary=False, + replace_existing_external=True, + check_data=True, + ): + """Store all data externally in file external_file_path. the binary + allows storage in a binary file. If replace_existing_external is set + to False, this method will not do anything if the data is already in + an external file. + + Parameters + ---------- + external_file_path : str + Path to external file + binary : bool + Store data in a binary file + replace_existing_external : bool + Whether to replace an existing external file. + check_data : bool + Verify data prior to storing + + """ + self._cache_model_grid = True + for sp in self._data_storage.keys(): + self._current_key = sp + storage = self._get_storage() + if storage.internal_size == 0: + storage.internal_data = self.get_dataframe(sp) + if storage.internal_size > 0 and ( + self._get_storage().data_storage_type + != DataStorageType.external_file + or replace_existing_external + ): + fname, ext = os.path.splitext(external_file_path) + if datautil.DatumUtil.is_int(sp): + full_name = f"{fname}_{int(sp) + 1}{ext}" + else: + full_name = f"{fname}_{sp}{ext}" + + super().store_as_external_file( + full_name, + binary, + replace_existing_external, + check_data, + ) + self._cache_model_grid = False + + def store_internal( + self, + check_data=True, + ): + """Store all data internally. + + Parameters + ---------- + check_data : bool + Verify data prior to storing + + """ + self._cache_model_grid = True + for sp in self._data_storage.keys(): + self._current_key = sp + if ( + self._get_storage().data_storage_type + == DataStorageType.external_file + ): + super().store_internal( + check_data, + ) + self._cache_model_grid = False + + def has_data(self, key=None): + """Returns whether this MFList has any data associated with it in key + "key".""" + if key is None: + for sto_key in self._data_storage.keys(): + self.get_data_prep(sto_key) + if super().has_data(): + return True + return False + else: + self.get_data_prep(key) + return super().has_data() + + def has_modified_ext_data(self, key=None): + if key is None: + for sto_key in self._data_storage.keys(): + self.get_data_prep(sto_key) + if super().has_modified_ext_data(): + return True + return False + else: + self.get_data_prep(key) + return super().has_modified_ext_data() + + def binary_ext_data(self, key=None): + if key is None: + for sto_key in self._data_storage.keys(): + self.get_data_prep(sto_key) + if super().binary_ext_data(): + return True + return False + else: + self.get_data_prep(key) + return super().binary_ext_data() + + def get_record(self, key=None, data_frame=False): + """Returns the data for stress period `key`. If no key is specified + returns all records in a dictionary with zero-based stress period + numbers as keys. See MFList's get_record documentation for more + information on the format of each record returned. + + Parameters + ---------- + key : int + Zero-based stress period to return data from. + data_frame : bool + whether to return a Pandas DataFrame object instead of a + recarray + Returns + ------- + data_record : dict + + """ + if self._data_storage is not None and len(self._data_storage) > 0: + if key is None: + output = {} + for key in self._data_storage.keys(): + self.get_data_prep(key) + output[key] = super().get_record(data_frame=data_frame) + return output + self.get_data_prep(key) + return super().get_record() + else: + return None + + def get_dataframe(self, key=None, apply_mult=False): + return self.get_data(key, apply_mult, dataframe=True) + + def get_data(self, key=None, apply_mult=False, dataframe=False, **kwargs): + """Returns the data for stress period `key`. + + Parameters + ---------- + key : int + Zero-based stress period to return data from. + apply_mult : bool + Apply multiplier + dataframe : bool + Get as pandas dataframe + + Returns + ------- + data : recarray + + """ + if self._data_storage is not None and len(self._data_storage) > 0: + if key is None: + if "array" in kwargs: + output = [] + sim_time = self._data_dimensions.package_dim.model_dim[ + 0 + ].simulation_time + num_sp = sim_time.get_num_stress_periods() + data = None + for sp in range(0, num_sp): + if sp in self._data_storage: + self.get_data_prep(sp) + data = super().get_data(apply_mult=apply_mult) + elif self._block.header_exists(sp): + data = None + output.append(data) + return output + else: + output = {} + for key in self._data_storage.keys(): + self.get_data_prep(key) + if dataframe: + output[key] = super().get_dataframe() + else: + output[key] = super().get_data( + apply_mult=apply_mult + ) + return output + self.get_data_prep(key) + if dataframe: + return super().get_dataframe() + else: + return super().get_data(apply_mult=apply_mult) + else: + return None + + def set_record(self, record, autofill=False, check_data=True): + """Sets the contents of the data based on the contents of + 'record`. + + Parameters + ---------- + record : dict + Record being set. Record must be a dictionary with + keys as zero-based stress periods and values as dictionaries + containing the data and metadata. See MFList's set_record + documentation for more information on the format of the values. + autofill : bool + Automatically correct data + check_data : bool + Whether to verify the data + """ + self._set_data_record( + record, + autofill=autofill, + check_data=check_data, + is_record=True, + ) + + def set_data(self, data, key=None, autofill=False): + """Sets the contents of the data at time `key` to `data`. + + Parameters + ---------- + data : dict, recarray, list + Data being set. Data can be a dictionary with keys as + zero-based stress periods and values as the data. If data is + a recarray or list of tuples, it will be assigned to the + stress period specified in `key`. If any is set to None, that + stress period of data will be removed. + key : int + Zero based stress period to assign data too. Does not apply + if `data` is a dictionary. + autofill : bool + Automatically correct data. + """ + self._set_data_record(data, key, autofill) + + def masked_4D_arrays_itr(self): + """Returns list data as an iterator of a masked 4D array.""" + model_grid = self._data_dimensions.get_model_grid() + nper = self._data_dimensions.package_dim.model_dim[ + 0 + ].simulation_time.get_num_stress_periods() + # get the first kper + arrays = self.to_array(kper=0, mask=True) + + if arrays is not None: + # initialize these big arrays + for name, array in arrays.items(): + if model_grid.grid_type() == DiscretizationType.DIS: + m4d = np.zeros( + ( + nper, + model_grid.num_layers(), + model_grid.num_rows(), + model_grid.num_columns(), + ) + ) + m4d[0, :, :, :] = array + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for tname, array in arrays.items(): + if tname == name: + m4d[kper, :, :, :] = array + yield name, m4d + else: + m3d = np.zeros( + ( + nper, + model_grid.num_layers(), + model_grid.num_cells_per_layer(), + ) + ) + m3d[0, :, :] = array + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for tname, array in arrays.items(): + if tname == name: + m3d[kper, :, :] = array + yield name, m3d + + def _set_data_record( + self, + data_record, + key=None, + autofill=False, + check_data=False, + is_record=False, + ): + self._cache_model_grid = True + if isinstance(data_record, dict): + if "filename" not in data_record and "data" not in data_record: + # each item in the dictionary is a list for one stress period + # the dictionary key is the stress period the list is for + del_keys = [] + for key, list_item in data_record.items(): + list_item_record = False + if list_item is None: + self.remove_transient_key(key) + del_keys.append(key) + self.empty_keys[key] = False + elif isinstance(list_item, list) and len(list_item) == 0: + self.empty_keys[key] = True + else: + self.empty_keys[key] = False + if isinstance(list_item, dict): + list_item_record = True + self._set_data_prep(list_item, key) + if list_item_record: + super().set_record(list_item, autofill, check_data) + else: + super().set_data( + list_item, + autofill=autofill, + check_data=check_data, + ) + for key in del_keys: + del data_record[key] + else: + self.empty_keys[key] = False + self._set_data_prep(data_record["data"], key) + super().set_data(data_record, autofill) + else: + if is_record: + comment = ( + "Set record method requires that data_record is a " + "dictionary." + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + "setting data record", + self.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + comment, + self._simulation_data.debug, + ) + if key is None: + # search for a key + new_key_index = self.structure.first_non_keyword_index() + if ( + new_key_index is not None + and len(data_record) > new_key_index + ): + key = data_record[new_key_index] + else: + key = 0 + if isinstance(data_record, list) and len(data_record) == 0: + self.empty_keys[key] = True + else: + check = True + if ( + isinstance(data_record, list) + and len(data_record) > 0 + and data_record[0] == "no_check" + ): + # not checking data + check = False + data_record = data_record[1:] + self.empty_keys[key] = False + if data_record is None: + self.remove_transient_key(key) + else: + self._set_data_prep(data_record, key) + super().set_data(data_record, autofill, check_data=check) + self._cache_model_grid = False + + def external_file_name(self, key=0): + """Returns external file name, or None if this is not external data. + + Parameters + ---------- + key : int + Zero based stress period to return data from. + """ + if key in self.empty_keys and self.empty_keys[key]: + return None + else: + self._get_file_entry_prep(key) + return super().external_file_name() + + def write_file_entry( + self, + fd_data_file, + key=0, + ext_file_action=ExtFileAction.copy_relative_paths, + fd_main=None, + ): + """Returns a string containing the data at time `key` formatted for a + MODFLOW 6 file. + + Parameters + ---------- + fd_data_file : file + File to write to + key : int + Zero based stress period to return data from. + ext_file_action : ExtFileAction + How to handle external paths. + + Returns + ------- + file entry : str + + """ + if key in self.empty_keys and self.empty_keys[key]: + return "" + else: + self._get_file_entry_prep(key) + return super().write_file_entry( + fd_data_file, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + + def get_file_entry( + self, key=0, ext_file_action=ExtFileAction.copy_relative_paths + ): + """Returns a string containing the data at time `key` formatted for a + MODFLOW 6 file. + + Parameters + ---------- + key : int + Zero based stress period to return data from. + ext_file_action : ExtFileAction + How to handle external paths. + + Returns + ------- + file entry : str + + """ + if key in self.empty_keys and self.empty_keys[key]: + return "" + else: + self._get_file_entry_prep(key) + return super()._write_file_entry( + None, ext_file_action=ext_file_action + ) + + def load( + self, + first_line, + file_handle, + block_header, + pre_data_comments=None, + external_file_info=None, + ): + """Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + + Parameters + ---------- + first_line : str + A string containing the first line of data in this list. + file_handle : file descriptor + A file handle for the data file which points to the second + line of data for this array + block_header : MFBlockHeader + Block header object that contains block header information + for the block containing this data + pre_data_comments : MFComment + Comments immediately prior to the data + external_file_info : list + Contains information about storing files externally + + """ + self._load_prep(block_header) + return super().load( + first_line, + file_handle, + block_header, + pre_data_comments, + external_file_info, + ) + + def append_list_as_record(self, record, key=0): + """Appends the list `data` as a single record in this list's recarray + at time `key`. Assumes `data` has the correct dimensions. + + Parameters + ---------- + record : list + Data to append + key : int + Zero based stress period to append data too. + + """ + self._append_list_as_record_prep(record, key) + super().append_list_as_record(record) + + def update_record(self, record, key_index, key=0): + """Updates a record at index `key_index` and time `key` with the + contents of `record`. If the index does not exist update_record + appends the contents of `record` to this list's recarray. + + Parameters + ---------- + record : list + Record to append + key_index : int + Index to update + key : int + Zero based stress period to append data too + + """ + + self._update_record_prep(key) + super().update_record(record, key_index) + + def _new_storage(self): + return {} + + def _get_storage(self): + if ( + self._current_key is None + or self._current_key not in self._data_storage + ): + return None + return self._data_storage[self._current_key] + + def plot( + self, + key=None, + names=None, + kper=0, + filename_base=None, + file_extension=None, + mflay=None, + **kwargs, + ): + """ + Plot stress period boundary condition (MfList) data for a specified + stress period + + Parameters + ---------- + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is zero) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from ...plot import PlotUtilities + + if not self.plottable: + raise TypeError("Simulation level packages are not plottable") + + # model.plot() will not work for a mf6 model oc package unless + # this check is here + if self.get_data() is None: + return + + if "cellid" not in self.dtype.names: + return + + axes = PlotUtilities._plot_mflist_helper( + self, + key=key, + names=names, + kper=kper, + filename_base=filename_base, + file_extension=file_extension, + mflay=mflay, + **kwargs, + ) + return axes diff --git a/flopy/mf6/data/mfdatascalar.py b/flopy/mf6/data/mfdatascalar.py index 622754f560..88db11d061 100644 --- a/flopy/mf6/data/mfdatascalar.py +++ b/flopy/mf6/data/mfdatascalar.py @@ -737,7 +737,6 @@ def __init__( path=path, dimensions=dimensions, ) - self._transient_setup(self._data_storage) self.repeating = True @property diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index c74fd31d62..fa0836076e 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2168,136 +2168,28 @@ def process_internal_line(self, arr_line): return multiplier, print_format def process_open_close_line(self, arr_line, layer, store=True): - # process open/close line - index = 2 - if self._data_type == DatumType.integer: - multiplier = 1 - else: - multiplier = 1.0 - print_format = None - binary = False - data_file = None - data = None - data_dim = self.data_dimensions - if isinstance(arr_line, list): - if len(arr_line) < 2 and store: - message = ( - 'Data array "{}" contains a OPEN/CLOSE ' - "that is not followed by a file. {}".format( - data_dim.structure.name, data_dim.structure.path - ) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - "processing open/close line", - data_dim.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ) - while index < len(arr_line): - if isinstance(arr_line[index], str): - word = arr_line[index].lower() - if word == "factor" and index + 1 < len(arr_line): - try: - multiplier = convert_data( - arr_line[index + 1], - self.data_dimensions, - self._data_type, - ) - except Exception as ex: - message = ( - "Data array {} contains an OPEN/CLOSE " - "with an invalid multiplier following " - 'the "factor" keyword.' - ".".format(data_dim.structure.name) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - "processing open/close line", - data_dim.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ex, - ) - index += 2 - elif word == "iprn" and index + 1 < len(arr_line): - print_format = arr_line[index + 1] - index += 2 - elif word == "data" and index + 1 < len(arr_line): - data = arr_line[index + 1] - index += 2 - elif word == "binary" or word == "(binary)": - binary = True - index += 1 - else: - break - else: - break - # save comments - if index < len(arr_line): - self.layer_storage[layer].comments = MFComment( - " ".join(arr_line[index:]), - self.data_dimensions.structure.path, - self._simulation_data, - layer, - ) - if arr_line[0].lower() == "open/close": - data_file = clean_filename(arr_line[1]) - else: - data_file = clean_filename(arr_line[0]) - elif isinstance(arr_line, dict): - for key, value in arr_line.items(): - if key.lower() == "factor": - try: - multiplier = convert_data( - value, self.data_dimensions, self._data_type - ) - except Exception as ex: - message = ( - "Data array {} contains an OPEN/CLOSE " - "with an invalid factor following the " - '"factor" keyword.' - ".".format(data_dim.structure.name) - ) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - "processing open/close line", - data_dim.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, - ex, - ) - if key.lower() == "iprn": - print_format = value - if key.lower() == "binary": - binary = bool(value) - if key.lower() == "data": - data = value - if "filename" in arr_line: - data_file = clean_filename(arr_line["filename"]) - + ( + multiplier, + print_format, + binary, + data_file, + data, + comment, + ) = mfdatautil.process_open_close_line( + arr_line, + data_dim, + self._data_type, + self._simulation_data.debug, + store, + ) + if comment is not None: + self.layer_storage[layer].comments = MFComment( + comment, + self.data_dimensions.structure.path, + self._simulation_data, + layer, + ) if data_file is None: message = ( "Data array {} contains an OPEN/CLOSE without a " diff --git a/flopy/mf6/data/mfdatautil.py b/flopy/mf6/data/mfdatautil.py index 00b4203ef8..bf9f571749 100644 --- a/flopy/mf6/data/mfdatautil.py +++ b/flopy/mf6/data/mfdatautil.py @@ -6,7 +6,7 @@ import numpy as np -from ...utils.datautil import DatumUtil, PyListUtil +from ...utils.datautil import DatumUtil, PyListUtil, clean_filename from ..mfbase import FlopyException, MFDataException from .mfstructure import DatumType @@ -137,6 +137,214 @@ def convert_data(data, data_dimensions, data_type, data_item=None, sub_amt=1): return data +def list_to_array(sarr, model_grid, kper=0, mask=False): + """Convert stress period boundary condition (MFDataList) data for a + specified stress period to a 3-D numpy array. + + Parameters + ---------- + sarr : recarray or list + list data to convert to array + model_grid : ModelGrid + model grid object for data + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + mask : bool + return array with np.NaN instead of zero + + Returns + ---------- + out : dict of numpy.ndarrays + Dictionary of 3-D numpy arrays containing the stress period data + for a selected stress period. The dictionary keys are the + MFDataList dtype names for the stress period data.""" + i0 = 1 + if not isinstance(sarr, list): + sarr = [sarr] + if len(sarr) == 0 or sarr[0] is None: + return None + if "inode" in sarr[0].dtype.names: + raise NotImplementedError() + arrays = {} + + if model_grid._grid_type.value == 1: + shape = ( + model_grid.num_layers(), + model_grid.num_rows(), + model_grid.num_columns(), + ) + elif model_grid._grid_type.value == 2: + shape = ( + model_grid.num_layers(), + model_grid.num_cells_per_layer(), + ) + else: + shape = (model_grid.num_cells_per_layer(),) + + for name in sarr[0].dtype.names[i0:]: + if not sarr[0].dtype.fields[name][0] == object: + arr = np.zeros(shape) + arrays[name] = arr.copy() + + if np.isscalar(sarr[0]): + # if there are no entries for this kper + if sarr[0] == 0: + if mask: + for name, arr in arrays.items(): + arrays[name][:] = np.NaN + return arrays + else: + raise Exception("MfList: something bad happened") + + for name, arr in arrays.items(): + cnt = np.zeros(shape, dtype=np.float64) + for sp_rec in sarr: + if sp_rec is not None: + for rec in sp_rec: + arr[rec["cellid"]] += rec[name] + cnt[rec["cellid"]] += 1.0 + # average keys that should not be added + if name != "cond" and name != "flux": + idx = cnt > 0.0 + arr[idx] /= cnt[idx] + if mask: + arr = np.ma.masked_where(cnt == 0.0, arr) + arr[cnt == 0.0] = np.NaN + + arrays[name] = arr.copy() + return arrays + + +def process_open_close_line( + arr_line, data_dim, data_type, sim_data, store=True +): + # process open/close line + index = 2 + if data_type == DatumType.integer: + multiplier = 1 + else: + multiplier = 1.0 + print_format = None + binary = False + data_file = None + data = None + comment = None + + if isinstance(arr_line, list): + if len(arr_line) < 2 and store: + message = ( + 'Data array "{}" contains a OPEN/CLOSE ' + "that is not followed by a file. {}".format( + data_dim.structure.name, data_dim.structure.path + ) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dim.structure.get_model(), + data_dim.structure.get_package(), + data_dim.structure.path, + "processing open/close line", + data_dim.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + debug, + ) + while index < len(arr_line): + if isinstance(arr_line[index], str): + word = arr_line[index].lower() + if word == "factor" and index + 1 < len(arr_line): + try: + multiplier = convert_data( + arr_line[index + 1], + data_dim, + data_type, + ) + except Exception as ex: + message = ( + "Data array {} contains an OPEN/CLOSE " + "with an invalid multiplier following " + 'the "factor" keyword.' + ".".format(data_dim.structure.name) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dim.structure.get_model(), + data_dim.structure.get_package(), + data_dim.structure.path, + "processing open/close line", + data_dim.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + sim_data.debug, + ex, + ) + index += 2 + elif word == "iprn" and index + 1 < len(arr_line): + print_format = arr_line[index + 1] + index += 2 + elif word == "data" and index + 1 < len(arr_line): + data = arr_line[index + 1] + index += 2 + elif word == "binary" or word == "(binary)": + binary = True + index += 1 + else: + break + else: + break + # save comments + if index < len(arr_line): + comment = " ".join(arr_line[index:]) + if arr_line[0].lower() == "open/close": + data_file = clean_filename(arr_line[1]) + else: + data_file = clean_filename(arr_line[0]) + elif isinstance(arr_line, dict): + for key, value in arr_line.items(): + if key.lower() == "factor": + try: + multiplier = convert_data(value, data_dim, data_type) + except Exception as ex: + message = ( + "Data array {} contains an OPEN/CLOSE " + "with an invalid factor following the " + '"factor" keyword.' + ".".format(data_dim.structure.name) + ) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dim.structure.get_model(), + data_dim.structure.get_package(), + data_dim.structure.path, + "processing open/close line", + data_dim.structure.name, + inspect.stack()[0][3], + type_, + value_, + traceback_, + message, + sim_data.debug, + ex, + ) + if key.lower() == "iprn": + print_format = value + if key.lower() == "binary": + binary = bool(value) + if key.lower() == "data": + data = value + if "filename" in arr_line: + data_file = clean_filename(arr_line["filename"]) + + return multiplier, print_format, binary, data_file, data, comment + + def to_string( val, data_type, diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index bfadbde27d..0e109e75bb 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -1035,13 +1035,15 @@ def __init__( self.simple_line = False def read_binary_data_from_file( - self, read_file, modelgrid, precision="double" + self, read_file, modelgrid, precision="double", build_cellid=True ): # read from file header, int_cellid_indexes, ext_cellid_indexes = self._get_header( modelgrid, precision ) file_array = np.fromfile(read_file, dtype=header, count=-1) + if not build_cellid: + return file_array # build data list for recarray cellid_size = len(self._get_cell_header(modelgrid)) data_list = [] @@ -1134,6 +1136,9 @@ def load_from_package( self._last_line_info = [] self._data_line = None + if first_line is None: + first_line = file_handle.readline() + # read in any pre data comments current_line = self._read_pre_data_comments( first_line, file_handle, pre_data_comments, storage diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 5ad263c480..e194f14b66 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -209,8 +209,11 @@ def get_block_structure_dict(self, path, common, model_file, block_parent): # get header dict header_dict = {} for item in self.dfn_list[0]: - if item == "multi-package": - header_dict["multi-package"] = True + if isinstance(item, str): + if item == "multi-package": + header_dict["multi-package"] = True + if item.startswith("package-type"): + header_dict["package-type"] = item.split(" ")[1] for dfn_entry in self.dfn_list[1:]: # load next data item new_data_item_struct = MFDataItemStructure() @@ -508,6 +511,8 @@ def get_block_structure_dict(self, path, common, model_file, block_parent): line_lst[3], line_lst[4], ] + elif len(line_lst) > 2 and line_lst[1] == "package-type": + header_dict["package-type"] = line_lst[2] # load file definitions for line in dfn_fp: if self._valid_line(line): @@ -1454,6 +1459,29 @@ def __init__(self, data_item, model_data, package_type, dfn_list): self.expected_data_items ) + @property + def basic_item(self): + if not self.parent_block.parent_package.stress_package: + return False + for item in self.data_item_structures: + if ( + ( + (item.repeating or item.optional) + and not ( + item.is_cellid or item.is_aux or item.is_boundname + ) + ) + or item.jagged_array is not None + or item.type == DatumType.keystring + or item.type == DatumType.keyword + or ( + item.description is not None + and "keyword `NONE'" in item.description + ) + ): + return False + return True + @property def is_mname(self): for item in self.data_item_structures: @@ -2109,6 +2137,14 @@ def __init__(self, dfn_file, path, common, model_file): self.has_packagedata = "packagedata" in self.blocks self.has_perioddata = "period" in self.blocks self.multi_package_support = "multi-package" in self.header + self.stress_package = ( + "package-type" in self.header + and self.header["package-type"] == "stress-package" + ) + self.advanced_stress_package = ( + "package-type" in self.header + and self.header["package-type"] == "advanced-stress-package" + ) self.dfn_list = dfn_file.dfn_list self.sub_package = self._sub_package() diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index ed5bcb8e84..f25900614c 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -4,7 +4,6 @@ import inspect import os import sys -from re import S import numpy as np @@ -14,7 +13,14 @@ from ..utils.check import mf6check from ..version import __version__ from .coordinates import modeldimensions -from .data import mfdata, mfdataarray, mfdatalist, mfdatascalar, mfstructure +from .data import ( + mfdata, + mfdataarray, + mfdatalist, + mfdataplist, + mfdatascalar, + mfstructure, +) from .data.mfdatautil import DataSearchOutput, MFComment, cellids_equal from .data.mfstructure import DatumType, MFDataItemStructure, MFStructure from .mfbase import ( @@ -466,28 +472,57 @@ def data_factory( trans_array.set_data(data, key=0) return trans_array elif data_type == mfstructure.DataType.list: - return mfdatalist.MFList( - sim_data, - model_or_sim, - structure, - data, - enable, - path, - dimensions, - package, - self, - ) + if ( + structure.basic_item + and self._container_package.package_type.lower() != "nam" + and self._simulation_data.use_pandas + ): + return mfdataplist.MFPandasList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) + else: + return mfdatalist.MFList( + sim_data, + model_or_sim, + structure, + data, + enable, + path, + dimensions, + package, + self, + ) elif data_type == mfstructure.DataType.list_transient: - trans_list = mfdatalist.MFTransientList( - sim_data, - model_or_sim, - structure, - enable, - path, - dimensions, - package, - self, - ) + if structure.basic_item and self._simulation_data.use_pandas: + trans_list = mfdataplist.MFPandasTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) + else: + trans_list = mfdatalist.MFTransientList( + sim_data, + model_or_sim, + structure, + enable, + path, + dimensions, + package, + self, + ) if data is not None: trans_list.set_data(data, key=0, autofill=True) return trans_list @@ -1300,7 +1335,10 @@ def set_all_data_external( if ( isinstance(dataset, mfdataarray.MFArray) or ( - isinstance(dataset, mfdatalist.MFList) + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) and dataset.structure.type == DatumType.recarray ) and dataset.enabled @@ -1347,7 +1385,10 @@ def set_all_data_internal(self, check_data=True): if ( isinstance(dataset, mfdataarray.MFArray) or ( - isinstance(dataset, mfdatalist.MFList) + ( + isinstance(dataset, mfdatalist.MFList) + or isinstance(dataset, mfdataplist.MFPandasList) + ) and dataset.structure.type == DatumType.recarray ) and dataset.enabled @@ -1361,8 +1402,37 @@ def _find_repeating_datasets(self): repeating_datasets.append(dataset) return repeating_datasets + def _prepare_external(self, fd, file_name, binary=False): + fd_main = fd + fd_path = self._simulation_data.mfpath.get_model_path(self.path[0]) + # resolve full file and folder path + fd_file_path = os.path.join(fd_path, file_name) + fd_folder_path = os.path.split(fd_file_path)[0] + if fd_folder_path != "": + if not os.path.exists(fd_folder_path): + # create new external data folder + os.makedirs(fd_folder_path) + return fd_main, fd_file_path + def _write_block(self, fd, block_header, ext_file_action): transient_key = None + basic_list = False + dataset_one = list(self.datasets.values())[0] + if isinstance( + dataset_one, + (mfdataplist.MFPandasList, mfdataplist.MFPandasTransientList), + ): + basic_list = True + for dataset in self.datasets.values(): + assert isinstance( + dataset, + ( + mfdataplist.MFPandasList, + mfdataplist.MFPandasTransientList, + ), + ) + # write block header + block_header.write_header(fd) if len(block_header.data_items) > 0: transient_key = block_header.get_transient_key() @@ -1379,9 +1449,25 @@ def _write_block(self, fd, block_header, ext_file_action): print( f" writing data {dataset.structure.name}..." ) - data_set_output.append( - dataset.get_file_entry(ext_file_action=ext_file_action) - ) + if basic_list: + ext_fname = dataset.external_file_name() + if ext_fname is not None: + # if dataset.has_modified_ext_data(): + binary = dataset.binary_ext_data() + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry(fd, fd_main=fd_main) + fd = fd_main + else: + dataset.write_file_entry(fd) + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) data_found = True else: if ( @@ -1392,20 +1478,43 @@ def _write_block(self, fd, block_header, ext_file_action): " writing data {} ({}).." ".".format(dataset.structure.name, transient_key) ) - if dataset.repeating: - output = dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action - ) - if output is not None: - data_set_output.append(output) - data_found = True + if basic_list: + ext_fname = dataset.external_file_name(transient_key) + if ext_fname is not None: + # if dataset.has_modified_ext_data(transient_key): + binary = dataset.binary_ext_data(transient_key) + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, ext_fname, binary + ) + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + fd_main=fd_main, + ) + fd = fd_main + else: + dataset.write_file_entry( + fd, + transient_key, + ext_file_action=ext_file_action, + ) else: - data_set_output.append( - dataset.get_file_entry( - ext_file_action=ext_file_action + if dataset.repeating: + output = dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action ) - ) - data_found = True + if output is not None: + data_set_output.append(output) + data_found = True + else: + data_set_output.append( + dataset.get_file_entry( + ext_file_action=ext_file_action + ) + ) + data_found = True except MFDataException as mfde: raise MFDataException( mfdata_except=mfde, @@ -1419,46 +1528,30 @@ def _write_block(self, fd, block_header, ext_file_action): ) if not data_found: return - # write block header - block_header.write_header(fd) - - if self.external_file_name is not None: - # write block contents to external file - indent_string = self._simulation_data.indent_string - fd.write(f"{indent_string}open/close {self.external_file_name}\n") - fd_main = fd - fd_path = os.path.split(os.path.realpath(fd.name))[0] - try: - fd = open(os.path.join(fd_path, self.external_file_name), "w") - except: - type_, value_, traceback_ = sys.exc_info() - message = ( - f'Error reading external file "{self.external_file_name}"' + if not basic_list: + # write block header + block_header.write_header(fd) + + if self.external_file_name is not None: + indent_string = self._simulation_data.indent_string + fd.write( + f"{indent_string}open/close " + f'"{self.external_file_name}"\n' ) - raise MFDataException( - self._container_package.model_name, - self._container_package._get_pname(), - self.path, - "reading external file", - self.structure.name, - inspect.stack()[0][3], - type_, - value_, - traceback_, - message, - self._simulation_data.debug, + # write block contents to external file + fd_main, fd = self._prepare_external( + fd, self.external_file_name ) - - # write data sets - for output in data_set_output: - fd.write(output) + # write data sets + for output in data_set_output: + fd.write(output) # write trailing comments pth = block_header.blk_trailing_comment_path if pth in self._simulation_data.mfdata: self._simulation_data.mfdata[pth].write(fd) - if self.external_file_name is not None: + if self.external_file_name is not None and not basic_list: # switch back writing to package file fd.close() fd = fd_main @@ -1975,10 +2068,12 @@ def check(self, f=None, verbose=True, level=1, checktype=None): for row in data: row_size = len(row) aux_start_loc = ( - row_size - num_aux_names - offset + row_size - num_aux_names - offset - 1 ) # loop through auxiliary variables - for idx, var in enumerate(aux_names): + for idx, var in enumerate( + list(aux_names[0])[1:] + ): # get index of current aux variable data_index = aux_start_loc + idx # verify auxiliary value is either diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index 264f35fdbd..f5f806d700 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -257,6 +257,7 @@ def __init__(self, path: Union[str, os.PathLike], mfsim): self.verbosity_level = VerbosityLevel.normal self.max_columns_user_set = False self.max_columns_auto_set = False + self.use_pandas = True self._update_str_format() @@ -430,6 +431,8 @@ class MFSimulationBase(PackageContainer): and only writes external data if the data has changed. This option automatically overrides the verify_data and auto_set_sizes, turning both off. + use_pandas: bool + Load/save data using pandas dataframes (for supported data) Examples -------- >>> s = MFSimulationBase.load('my simulation', 'simulation.nam') @@ -455,12 +458,14 @@ def __init__( memory_print_option=None, write_headers=True, lazy_io=False, + use_pandas=True, ): super().__init__(MFSimulationData(sim_ws, self), sim_name) self.simulation_data.verbosity_level = self._resolve_verbosity_level( verbosity_level ) self.simulation_data.write_headers = write_headers + self.simulation_data.use_pandas = use_pandas if lazy_io: self.simulation_data.lazy_io = True @@ -686,6 +691,7 @@ def load( verify_data=False, write_headers=True, lazy_io=False, + use_pandas=True, ): """ Load an existing model. Do not call this method directly. Should only @@ -729,6 +735,9 @@ def load( and only writes external data if the data has changed. This option automatically overrides the verify_data and auto_set_sizes, turning both off. + use_pandas: bool + Load/save data using pandas dataframes (for supported data) + Returns ------- sim : MFSimulation object @@ -746,6 +755,7 @@ def load( sim_ws, verbosity_level, write_headers=write_headers, + use_pandas=use_pandas, ) verbosity_level = instance.simulation_data.verbosity_level diff --git a/flopy/mf6/modflow/mfgwfchd.py b/flopy/mf6/modflow/mfgwfchd.py index b1d018e026..c2a458e7de 100644 --- a/flopy/mf6/modflow/mfgwfchd.py +++ b/flopy/mf6/modflow/mfgwfchd.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -117,10 +117,7 @@ class ModflowGwfchd(mfpackage.MFPackage): dfn_file_name = "gwf-chd.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfdrn.py b/flopy/mf6/modflow/mfgwfdrn.py index 5e3e68612a..31380cf1c2 100644 --- a/flopy/mf6/modflow/mfgwfdrn.py +++ b/flopy/mf6/modflow/mfgwfdrn.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -140,10 +140,7 @@ class ModflowGwfdrn(mfpackage.MFPackage): dfn_file_name = "gwf-drn.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfevt.py b/flopy/mf6/modflow/mfgwfevt.py index fcc407e9c3..2fc5adaa4f 100644 --- a/flopy/mf6/modflow/mfgwfevt.py +++ b/flopy/mf6/modflow/mfgwfevt.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -160,10 +160,7 @@ class ModflowGwfevt(mfpackage.MFPackage): dfn_file_name = "gwf-evt.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name fixed_cell", diff --git a/flopy/mf6/modflow/mfgwfevta.py b/flopy/mf6/modflow/mfgwfevta.py index eb74556e7a..691ae9e9f7 100644 --- a/flopy/mf6/modflow/mfgwfevta.py +++ b/flopy/mf6/modflow/mfgwfevta.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator @@ -118,10 +118,7 @@ class ModflowGwfevta(mfpackage.MFPackage): dfn_file_name = "gwf-evta.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name readasarrays", diff --git a/flopy/mf6/modflow/mfgwfghb.py b/flopy/mf6/modflow/mfgwfghb.py index 0bc28e4207..787d9ec12e 100644 --- a/flopy/mf6/modflow/mfgwfghb.py +++ b/flopy/mf6/modflow/mfgwfghb.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -128,10 +128,7 @@ class ModflowGwfghb(mfpackage.MFPackage): dfn_file_name = "gwf-ghb.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwflak.py b/flopy/mf6/modflow/mfgwflak.py index dc01e74385..6d060ff8d2 100644 --- a/flopy/mf6/modflow/mfgwflak.py +++ b/flopy/mf6/modflow/mfgwflak.py @@ -469,10 +469,7 @@ class ModflowGwflak(mfpackage.MFPackage): dfn_file_name = "gwf-lak.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type advanced-stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfmaw.py b/flopy/mf6/modflow/mfgwfmaw.py index 51ca09dfd9..1eeb143388 100644 --- a/flopy/mf6/modflow/mfgwfmaw.py +++ b/flopy/mf6/modflow/mfgwfmaw.py @@ -395,10 +395,7 @@ class ModflowGwfmaw(mfpackage.MFPackage): dfn_file_name = "gwf-maw.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type advanced-stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfrch.py b/flopy/mf6/modflow/mfgwfrch.py index eeac8c5719..699a9ce9ba 100644 --- a/flopy/mf6/modflow/mfgwfrch.py +++ b/flopy/mf6/modflow/mfgwfrch.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -123,10 +123,7 @@ class ModflowGwfrch(mfpackage.MFPackage): dfn_file_name = "gwf-rch.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name fixed_cell", diff --git a/flopy/mf6/modflow/mfgwfrcha.py b/flopy/mf6/modflow/mfgwfrcha.py index 35fc60d5fb..1a1a05a894 100644 --- a/flopy/mf6/modflow/mfgwfrcha.py +++ b/flopy/mf6/modflow/mfgwfrcha.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator @@ -116,10 +116,7 @@ class ModflowGwfrcha(mfpackage.MFPackage): dfn_file_name = "gwf-rcha.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name readasarrays", diff --git a/flopy/mf6/modflow/mfgwfriv.py b/flopy/mf6/modflow/mfgwfriv.py index 56389249b8..e50cf651f6 100644 --- a/flopy/mf6/modflow/mfgwfriv.py +++ b/flopy/mf6/modflow/mfgwfriv.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -129,10 +129,7 @@ class ModflowGwfriv(mfpackage.MFPackage): dfn_file_name = "gwf-riv.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfsfr.py b/flopy/mf6/modflow/mfgwfsfr.py index e32fb7f3bd..70b470d143 100644 --- a/flopy/mf6/modflow/mfgwfsfr.py +++ b/flopy/mf6/modflow/mfgwfsfr.py @@ -476,10 +476,7 @@ class ModflowGwfsfr(mfpackage.MFPackage): dfn_file_name = "gwf-sfr.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type advanced-stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfuzf.py b/flopy/mf6/modflow/mfgwfuzf.py index dd845bdaa6..dc31038a18 100644 --- a/flopy/mf6/modflow/mfgwfuzf.py +++ b/flopy/mf6/modflow/mfgwfuzf.py @@ -297,10 +297,7 @@ class ModflowGwfuzf(mfpackage.MFPackage): dfn_file_name = "gwf-uzf.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type advanced-stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfgwfwel.py b/flopy/mf6/modflow/mfgwfwel.py index 6dee2f1742..33160c324f 100644 --- a/flopy/mf6/modflow/mfgwfwel.py +++ b/flopy/mf6/modflow/mfgwfwel.py @@ -1,6 +1,6 @@ # DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY # mf6/utils/createpackages.py -# FILE created on June 29, 2023 14:20:38 UTC +# FILE created on August 29, 2023 20:06:54 UTC from .. import mfpackage from ..data.mfdatautil import ListTemplateGenerator @@ -141,10 +141,7 @@ class ModflowGwfwel(mfpackage.MFPackage): dfn_file_name = "gwf-wel.dfn" dfn = [ - [ - "header", - "multi-package", - ], + ["header", "multi-package", "package-type stress-package"], [ "block options", "name auxiliary", diff --git a/flopy/mf6/modflow/mfsimulation.py b/flopy/mf6/modflow/mfsimulation.py index 28fb749272..0621712c1b 100644 --- a/flopy/mf6/modflow/mfsimulation.py +++ b/flopy/mf6/modflow/mfsimulation.py @@ -74,7 +74,8 @@ class MFSimulation(mfsimbase.MFSimulationBase): load : (sim_name : str, version : string, exe_name : str or PathLike, sim_ws : str or PathLike, strict : bool, verbosity_level : int, load_only : list, verify_data : bool, - write_headers : bool, lazy_io : bool) : MFSimulation + write_headers : bool, lazy_io : bool, use_pandas : bool, + ) : MFSimulation a class method that loads a simulation from files """ @@ -86,6 +87,7 @@ def __init__( sim_ws: Union[str, os.PathLike] = os.curdir, verbosity_level=1, write_headers=True, + use_pandas=True, lazy_io=False, continue_=None, nocheck=None, @@ -101,6 +103,7 @@ def __init__( verbosity_level=verbosity_level, write_headers=write_headers, lazy_io=lazy_io, + use_pandas=use_pandas, ) self.name_file.continue_.set_data(continue_) @@ -128,6 +131,7 @@ def load( verify_data=False, write_headers=True, lazy_io=False, + use_pandas=True, ): return mfsimbase.MFSimulationBase.load( cls, @@ -141,4 +145,5 @@ def load( verify_data, write_headers, lazy_io, + use_pandas, ) diff --git a/flopy/mf6/utils/createpackages.py b/flopy/mf6/utils/createpackages.py index 32b721b3d2..b891e6524a 100644 --- a/flopy/mf6/utils/createpackages.py +++ b/flopy/mf6/utils/createpackages.py @@ -152,6 +152,11 @@ def build_dfn_string(dfn_list, header, package_abbr, flopy_dict): for key, value in header.items(): if key == "multi-package": dfn_string = f'{dfn_string}\n{leading_spaces} "multi-package", ' + if key == "package-type": + dfn_string = ( + f'{dfn_string}\n{leading_spaces} "package-type ' f'{value}"' + ) + # process solution packages if package_abbr in flopy_dict["solution_packages"]: model_types = '", "'.join( @@ -425,7 +430,8 @@ def build_sim_load(): "string,\n exe_name : str or PathLike, " "sim_ws : str or PathLike, strict : bool,\n verbosity_level : " "int, load_only : list, verify_data : bool,\n " - "write_headers : bool, lazy_io : bool) : MFSimulation\n" + "write_headers : bool, lazy_io : bool, use_pandas : bool,\n " + ") : MFSimulation\n" " a class method that loads a simulation from files" '\n """' ) @@ -437,14 +443,14 @@ def build_sim_load(): "sim_ws: Union[str, os.PathLike] = os.curdir,\n " "strict=True, verbosity_level=1, load_only=None,\n " "verify_data=False, write_headers=True,\n " - "lazy_io=False,):\n " + "lazy_io=False, use_pandas=True):\n " "return mfsimbase.MFSimulationBase.load(cls, sim_name, version, " "\n " "exe_name, sim_ws, strict,\n" - " verbosity_level, " + " verbosity_level, " "load_only,\n " "verify_data, write_headers, " - "\n lazy_io)" + "\n lazy_io, use_pandas)" "\n" ) return sim_load, sim_load_c @@ -996,6 +1002,7 @@ def create_packages(): init_vars = build_model_init_vars(options_param_list) options_param_list.insert(0, "lazy_io=False") + options_param_list.insert(0, "use_pandas=True") options_param_list.insert(0, "write_headers=True") options_param_list.insert(0, "verbosity_level=1") options_param_list.insert( @@ -1033,6 +1040,7 @@ def create_packages(): "verbosity_level=verbosity_level,\n{}" "write_headers=write_headers,\n{}" "lazy_io=lazy_io,\n{}" + "use_pandas=use_pandas,\n{}" ")\n".format( sparent_init_string, spaces, @@ -1042,6 +1050,7 @@ def create_packages(): spaces, spaces, spaces, + spaces, ) ) sim_import_string = ( diff --git a/flopy/mf6/utils/model_splitter.py b/flopy/mf6/utils/model_splitter.py index 3f36bc53b0..46ecc4e768 100644 --- a/flopy/mf6/utils/model_splitter.py +++ b/flopy/mf6/utils/model_splitter.py @@ -7,7 +7,7 @@ from ...mf6 import modflow from ...plot import plotutil from ...utils import import_optional_dependency -from ..data import mfdataarray, mfdatalist, mfdatascalar +from ..data import mfdataarray, mfdatalist, mfdataplist, mfdatascalar from ..mfbase import PackageContainer OBS_ID1_LUT = { @@ -2890,12 +2890,47 @@ def _remap_package(self, package, ismvr=False): elif isinstance(value, mfdataarray.MFArray): mapped_data = self._remap_array(item, value, mapped_data) - elif isinstance(value, mfdatalist.MFTransientList): + elif isinstance( + value, + ( + mfdatalist.MFTransientList, + mfdataplist.MFPandasTransientList, + ), + ): + if isinstance(value, mfdataplist.MFPandasTransientList): + list_data = mfdatalist.MFTransientList( + value._simulation_data, + value._model_or_sim, + value.structure, + True, + value.path, + value._data_dimensions.package_dim, + value._package, + value._block, + ) + list_data.set_record(value.get_record()) + value = list_data mapped_data = self._remap_transient_list( item, value, mapped_data ) - elif isinstance(value, mfdatalist.MFList): + elif isinstance( + value, (mfdatalist.MFList, mfdataplist.MFPandasList) + ): + if isinstance(value, mfdataplist.MFPandasList): + list_data = mfdatalist.MFList( + value._simulation_data, + value._model_or_sim, + value.structure, + None, + True, + value.path, + value._data_dimensions.package_dim, + value._package, + value._block, + ) + list_data.set_record(value.get_record()) + value = list_data mapped_data = self._remap_mflist(item, value, mapped_data) elif isinstance(value, mfdatascalar.MFScalar):