diff --git a/autotest/regression/test_lgr.py b/autotest/regression/test_lgr.py
index 2373a3149..acf74ef98 100644
--- a/autotest/regression/test_lgr.py
+++ b/autotest/regression/test_lgr.py
@@ -46,9 +46,9 @@ def test_simplelgr(function_tmpdir, example_data_path):
# get the namefiles of the parent and child
namefiles = lgr.get_namefiles()
- assert (
- len(namefiles) == 2
- ), f"get_namefiles returned {len(namefiles)} items instead of 2"
+ assert len(namefiles) == 2, (
+ f"get_namefiles returned {len(namefiles)} items instead of 2"
+ )
tpth = dirname(namefiles[0])
assert tpth == model_ws2, f"dir path is {tpth} not {model_ws2}"
diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py
index 4c7a9fb7d..9f7d03251 100644
--- a/autotest/regression/test_mf6.py
+++ b/autotest/regression/test_mf6.py
@@ -1010,8 +1010,7 @@ def test_np002(function_tmpdir, example_data_path):
"checker threshold of 1e-06" in summary
)
assert (
- "sto package: specific yield values above "
- "checker threshold of 0.5" in summary
+ "sto package: specific yield values above checker threshold of 0.5" in summary
)
assert "Not a number" in summary
model.remove_package("chd_2")
diff --git a/autotest/regression/test_mfnwt.py b/autotest/regression/test_mfnwt.py
index a7b9fbedc..a4c6ba308 100644
--- a/autotest/regression/test_mfnwt.py
+++ b/autotest/regression/test_mfnwt.py
@@ -137,6 +137,6 @@ def test_run_mfnwt_model(function_tmpdir, namfile):
assert compare_heads(fn0, fn1, outfile=fsum), "head comparison failure"
fsum = function_tmpdir / f"{base_name}.budget.out"
- assert compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- ), "budget comparison failure"
+ assert compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum), (
+ "budget comparison failure"
+ )
diff --git a/autotest/regression/test_modflow.py b/autotest/regression/test_modflow.py
index 9fee80b69..97fefe619 100644
--- a/autotest/regression/test_modflow.py
+++ b/autotest/regression/test_modflow.py
@@ -151,9 +151,9 @@ def test_gage(function_tmpdir, example_data_path):
for f in files:
pth0 = join(ws, f)
pth1 = join(model_ws2, f)
- assert filecmp.cmp(
- pth0, pth1
- ), f'new and original gage file "{f}" are not binary equal.'
+ assert filecmp.cmp(pth0, pth1), (
+ f'new and original gage file "{f}" are not binary equal.'
+ )
__example_data_path = get_example_data_path()
diff --git a/autotest/regression/test_str.py b/autotest/regression/test_str.py
index 50756aef1..7045516e0 100644
--- a/autotest/regression/test_str.py
+++ b/autotest/regression/test_str.py
@@ -117,6 +117,6 @@ def test_str_fixed_free(function_tmpdir, example_data_path):
# compare the fixed and free format head files
fn1 = function_tmpdir / "str.nam"
fn2 = function_tmpdir / "str.nam"
- assert compare_heads(
- fn1, fn2, verbose=True
- ), "fixed and free format input output head files are different"
+ assert compare_heads(fn1, fn2, verbose=True), (
+ "fixed and free format input output head files are different"
+ )
diff --git a/autotest/regression/test_wel.py b/autotest/regression/test_wel.py
index 110bb4148..f8e9a1a00 100644
--- a/autotest/regression/test_wel.py
+++ b/autotest/regression/test_wel.py
@@ -99,6 +99,6 @@ def test_binary_well(function_tmpdir):
assert compare_heads(fn0, fn1, outfile=fsum), "head comparison failure"
fsum = os.path.join(function_tmpdir, f"{os.path.splitext(mfnam)[0]}.budget.out")
- assert compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum
- ), "budget comparison failure"
+ assert compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fsum), (
+ "budget comparison failure"
+ )
diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py
index dd5b8161b..8060183ea 100644
--- a/autotest/test_binaryfile.py
+++ b/autotest/test_binaryfile.py
@@ -431,16 +431,16 @@ def test_binaryfile_read(function_tmpdir, freyberg_model_path):
h0 = h.get_data(totim=times[0])
h1 = h.get_data(kstpkper=kstpkper[0])
h2 = h.get_data(idx=0)
- assert np.array_equal(
- h0, h1
- ), "binary head read using totim != head read using kstpkper"
+ assert np.array_equal(h0, h1), (
+ "binary head read using totim != head read using kstpkper"
+ )
assert np.array_equal(h0, h2), "binary head read using totim != head read using idx"
ts = h.get_ts((0, 7, 5))
expected = 26.00697135925293
- assert np.isclose(
- ts[0, 1], expected
- ), f"time series value ({ts[0, 1]}) != {expected}"
+ assert np.isclose(ts[0, 1], expected), (
+ f"time series value ({ts[0, 1]}) != {expected}"
+ )
h.close()
# Check error when reading empty file
diff --git a/autotest/test_binarygrid_util.py b/autotest/test_binarygrid_util.py
index 4750c3fef..1b231e3c7 100644
--- a/autotest/test_binarygrid_util.py
+++ b/autotest/test_binarygrid_util.py
@@ -37,9 +37,9 @@ def test_mfgrddis_modelgrid(mfgrd_test_path):
assert isinstance(modelgrid, StructuredGrid), "invalid grid type"
lc = modelgrid.plot()
- assert isinstance(
- lc, matplotlib.collections.LineCollection
- ), f"could not plot grid object created from {fn}"
+ assert isinstance(lc, matplotlib.collections.LineCollection), (
+ f"could not plot grid object created from {fn}"
+ )
plt.close()
extents = modelgrid.extent
@@ -54,9 +54,9 @@ def test_mfgrddis_modelgrid(mfgrd_test_path):
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = modelgrid.verts
- assert (
- nvert == verts.shape[0]
- ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ assert nvert == verts.shape[0], (
+ f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ )
def test_mfgrddisv_MfGrdFile(mfgrd_test_path):
@@ -87,9 +87,9 @@ def test_mfgrddisv_modelgrid(mfgrd_test_path):
assert mg.ncpl == ncpl, f"ncpl ({mg.ncpl}) does not equal {ncpl}"
lc = mg.plot()
- assert isinstance(
- lc, matplotlib.collections.LineCollection
- ), f"could not plot grid object created from {fn}"
+ assert isinstance(lc, matplotlib.collections.LineCollection), (
+ f"could not plot grid object created from {fn}"
+ )
plt.close("all")
extents = mg.extent
@@ -102,9 +102,9 @@ def test_mfgrddisv_modelgrid(mfgrd_test_path):
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = mg.verts
- assert (
- nvert == verts.shape[0]
- ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ assert nvert == verts.shape[0], (
+ f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ )
cellxy = np.column_stack(mg.xyzcellcenters[:2])
errmsg = f"shape of flow.disv centroids {cellxy.shape} not equal to (218, 2)."
@@ -141,9 +141,9 @@ def test_mfgrddisu_modelgrid(mfgrd_test_path):
assert isinstance(mg, UnstructuredGrid), f"invalid grid type ({type(mg)})"
lc = mg.plot()
- assert isinstance(
- lc, matplotlib.collections.LineCollection
- ), f"could not plot grid object created from {fn}"
+ assert isinstance(lc, matplotlib.collections.LineCollection), (
+ f"could not plot grid object created from {fn}"
+ )
plt.close("all")
extents = mg.extent
@@ -156,6 +156,6 @@ def test_mfgrddisu_modelgrid(mfgrd_test_path):
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = mg.verts
- assert (
- nvert == verts.shape[0]
- ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ assert nvert == verts.shape[0], (
+ f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ )
diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py
index 7183b380a..98a5f1276 100644
--- a/autotest/test_cbc_full3D.py
+++ b/autotest/test_cbc_full3D.py
@@ -73,14 +73,14 @@ def load_mf6(path, ws_out):
def cbc_eval_size(cbcobj, nnodes, shape3d):
cbc_pth = cbcobj.filename
- assert (
- cbcobj.nnodes == nnodes
- ), f"{cbc_pth} nnodes ({cbcobj.nnodes}) does not equal {nnodes}"
+ assert cbcobj.nnodes == nnodes, (
+ f"{cbc_pth} nnodes ({cbcobj.nnodes}) does not equal {nnodes}"
+ )
a = np.squeeze(np.ones(cbcobj.shape, dtype=float))
b = np.squeeze(np.ones(shape3d, dtype=float))
- assert (
- a.shape == b.shape
- ), f"{cbc_pth} shape {cbcobj.shape} does not conform to {shape3d}"
+ assert a.shape == b.shape, (
+ f"{cbc_pth} shape {cbcobj.shape} does not conform to {shape3d}"
+ )
def cbc_eval_data(cbcobj, shape3d):
diff --git a/autotest/test_compare.py b/autotest/test_compare.py
index 3a961243b..ffa3aaae5 100644
--- a/autotest/test_compare.py
+++ b/autotest/test_compare.py
@@ -138,15 +138,15 @@ def test_compare_budget_and_heads(comparison_model_1):
assert success, "could not run the new MODFLOW-2005 model"
# compare the files
- assert compare_heads(
- fn0, fn1, outfile=fhsum
- ), "head comparison failure (pathlib.Path)"
- assert compare_heads(
- str(fn0), str(fn1), outfile=fhsum
- ), "head comparison failure (str path)"
- assert compare_budget(
- fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fbsum
- ), "budget comparison failure (pathlib.Path)"
+ assert compare_heads(fn0, fn1, outfile=fhsum), (
+ "head comparison failure (pathlib.Path)"
+ )
+ assert compare_heads(str(fn0), str(fn1), outfile=fhsum), (
+ "head comparison failure (str path)"
+ )
+ assert compare_budget(fn0, fn1, max_incpd=0.1, max_cumpd=0.1, outfile=fbsum), (
+ "budget comparison failure (pathlib.Path)"
+ )
assert compare_budget(
str(fn0), str(fn1), max_incpd=0.1, max_cumpd=0.1, outfile=str(fbsum)
), "budget comparison failure (str path)"
diff --git a/autotest/test_export.py b/autotest/test_export.py
index bd7c091b9..97514e12e 100644
--- a/autotest/test_export.py
+++ b/autotest/test_export.py
@@ -494,9 +494,9 @@ def test_shapefile(function_tmpdir, namfile):
fnc = model.export(fnc_name)
s = Reader(fnc_name)
- assert (
- s.numRecords == model.nrow * model.ncol
- ), f"wrong number of records in shapefile {fnc_name}"
+ assert s.numRecords == model.nrow * model.ncol, (
+ f"wrong number of records in shapefile {fnc_name}"
+ )
@requires_pkg("pyshp", name_map={"pyshp": "shapefile"})
@@ -896,9 +896,9 @@ def test_export_mf6_shp(function_tmpdir):
riv6spdarrays = dict(riv6.stress_period_data.masked_4D_arrays_itr())
rivspdarrays = dict(riv.stress_period_data.masked_4D_arrays_itr())
for k, v in rivspdarrays.items():
- assert (
- np.abs(np.nansum(v) - np.nansum(riv6spdarrays[k])) < 1e-6
- ), f"variable {k} is not equal"
+ assert np.abs(np.nansum(v) - np.nansum(riv6spdarrays[k])) < 1e-6, (
+ f"variable {k} is not equal"
+ )
pass
m.export(function_tmpdir / "mfnwt.shp")
@@ -1579,9 +1579,9 @@ def test_vtk_vertex(function_tmpdir, example_data_path):
hk = gwf.npf.k.array
hk[gwf.modelgrid.idomain == 0] = np.nan
- assert np.allclose(
- np.ravel(hk), hk2, equal_nan=True
- ), "Field data not properly written"
+ assert np.allclose(np.ravel(hk), hk2, equal_nan=True), (
+ "Field data not properly written"
+ )
@requires_exe("mf2005")
@@ -1652,9 +1652,9 @@ def test_vtk_pathline(function_tmpdir, example_data_path):
assert len(totim) == 12054, "Array size is incorrect"
assert np.abs(np.max(totim) - maxtime) < 100, "time values are incorrect"
- assert len(np.unique(pid)) == len(
- plines
- ), "number of particles are incorrect for modpath VTK"
+ assert len(np.unique(pid)) == len(plines), (
+ "number of particles are incorrect for modpath VTK"
+ )
def grid2disvgrid(nrow, ncol):
diff --git a/autotest/test_formattedfile.py b/autotest/test_formattedfile.py
index a7146398d..0d31ed308 100644
--- a/autotest/test_formattedfile.py
+++ b/autotest/test_formattedfile.py
@@ -95,18 +95,18 @@ def test_formattedfile_read(function_tmpdir, example_data_path):
h0 = h.get_data(totim=times[0])
h1 = h.get_data(kstpkper=kstpkper[0])
h2 = h.get_data(idx=0)
- assert np.array_equal(
- h0, h1
- ), "formatted head read using totim != head read using kstpkper"
- assert np.array_equal(
- h0, h2
- ), "formatted head read using totim != head read using idx"
+ assert np.array_equal(h0, h1), (
+ "formatted head read using totim != head read using kstpkper"
+ )
+ assert np.array_equal(h0, h2), (
+ "formatted head read using totim != head read using idx"
+ )
ts = h.get_ts((0, 7, 5))
expected = 944.487
- assert np.isclose(
- ts[0, 1], expected, 1e-6
- ), f"time series value ({ts[0, 1]}) != {expected}"
+ assert np.isclose(ts[0, 1], expected, 1e-6), (
+ f"time series value ({ts[0, 1]}) != {expected}"
+ )
h.close()
# Check error when reading empty file
diff --git a/autotest/test_grid.py b/autotest/test_grid.py
index c57ca2125..a60518050 100644
--- a/autotest/test_grid.py
+++ b/autotest/test_grid.py
@@ -426,9 +426,9 @@ def test_structured_from_gridspec(example_data_path, spc_file):
assert isinstance(modelgrid, StructuredGrid)
lc = modelgrid.plot()
- assert isinstance(
- lc, matplotlib.collections.LineCollection
- ), f"could not plot grid object created from {fn}"
+ assert isinstance(lc, matplotlib.collections.LineCollection), (
+ f"could not plot grid object created from {fn}"
+ )
plt.close()
extents = modelgrid.extent
@@ -442,9 +442,9 @@ def test_structured_from_gridspec(example_data_path, spc_file):
8000 + 8000 * np.sin(theta), # ymax
)
errmsg = f"extents {extents} of {fn} does not equal {rotated_extents}"
- assert all(
- np.isclose(x, x0) for x, x0 in zip(modelgrid.extent, rotated_extents)
- ), errmsg
+ assert all(np.isclose(x, x0) for x, x0 in zip(modelgrid.extent, rotated_extents)), (
+ errmsg
+ )
ncpl = modelgrid.ncol * modelgrid.nrow
assert modelgrid.ncpl == ncpl, f"ncpl ({modelgrid.ncpl}) does not equal {ncpl}"
@@ -454,9 +454,9 @@ def test_structured_from_gridspec(example_data_path, spc_file):
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, f"nvert ({maxvertex + 1}) does not equal {nvert}"
verts = modelgrid.verts
- assert (
- nvert == verts.shape[0]
- ), f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ assert nvert == verts.shape[0], (
+ f"number of vertex (x, y) pairs ({verts.shape[0]}) does not equal {nvert}"
+ )
@requires_pkg("shapely")
@@ -1149,9 +1149,9 @@ def test_voronoi_grid(request, function_tmpdir, grid_info):
plt.savefig(function_tmpdir / f"{name}.png")
assert ncpl == gridprops["ncpl"] or almost_right
- assert (
- len(invalid_cells) == 0
- ), f"The following cells do not have 3 or more vertices.\n{invalid_cells}"
+ assert len(invalid_cells) == 0, (
+ f"The following cells do not have 3 or more vertices.\n{invalid_cells}"
+ )
@pytest.fixture
@@ -1257,21 +1257,21 @@ def test_structured_ncb_thickness():
grid = GridCases.structured_cbd_small()
thickness = grid.cell_thickness
- assert thickness.shape[0] == grid.nlay + np.count_nonzero(
- grid.laycbd
- ), "grid cell_thickness attribute returns incorrect shape"
+ assert thickness.shape[0] == grid.nlay + np.count_nonzero(grid.laycbd), (
+ "grid cell_thickness attribute returns incorrect shape"
+ )
thickness = grid.remove_confining_beds(grid.cell_thickness)
assert thickness.shape == grid.shape, "quasi3d confining beds not properly removed"
sat_thick = grid.saturated_thickness(grid.cell_thickness)
- assert (
- sat_thick.shape == grid.shape
- ), "saturated_thickness confining beds not removed"
+ assert sat_thick.shape == grid.shape, (
+ "saturated_thickness confining beds not removed"
+ )
- assert (
- sat_thick[1, 0, 0] == 20
- ), "saturated_thickness is not properly indexing confining beds"
+ assert sat_thick[1, 0, 0] == 20, (
+ "saturated_thickness is not properly indexing confining beds"
+ )
@pytest.mark.parametrize(
@@ -1279,9 +1279,9 @@ def test_structured_ncb_thickness():
)
def test_unstructured_iverts(grid):
iverts = grid.iverts
- assert not any(
- None in l for l in iverts
- ), "None type should not be returned in iverts list"
+ assert not any(None in l for l in iverts), (
+ "None type should not be returned in iverts list"
+ )
@pytest.mark.parametrize(
diff --git a/autotest/test_headufile.py b/autotest/test_headufile.py
index e8dd08942..837162fce 100644
--- a/autotest/test_headufile.py
+++ b/autotest/test_headufile.py
@@ -105,9 +105,9 @@ def test_get_ts_multiple_nodes(mfusg_model):
multi_hds = head_file.get_ts(idx=nodes)
for i, node in enumerate(nodes):
layer, nn = get_lni(grid.ncpl, [node])[0]
- assert (
- multi_hds[0, i + 1] == head[layer][nn]
- ), "head from 'get_ts' != head from 'get_data'"
+ assert multi_hds[0, i + 1] == head[layer][nn], (
+ "head from 'get_ts' != head from 'get_data'"
+ )
@requires_exe("mfusg", "gridgen")
@@ -122,9 +122,9 @@ def test_get_ts_all_nodes(mfusg_model):
multi_hds = head_file.get_ts(idx=nodes)
for node in nodes:
layer, nn = get_lni(grid.ncpl, [node])[0]
- assert (
- multi_hds[0, node + 1] == head[layer][nn]
- ), "head from 'get_ts' != head from 'get_data'"
+ assert multi_hds[0, node + 1] == head[layer][nn], (
+ "head from 'get_ts' != head from 'get_data'"
+ )
@requires_exe("mfusg", "gridgen")
diff --git a/autotest/test_hydmodfile.py b/autotest/test_hydmodfile.py
index e3592ac8f..f4d3eb4a9 100644
--- a/autotest/test_hydmodfile.py
+++ b/autotest/test_hydmodfile.py
@@ -26,9 +26,9 @@ def test_hydmodfile_create(function_tmpdir):
m.hyd.write_file()
pth = function_tmpdir / "test.hyd"
hydload = ModflowHyd.load(pth, m)
- assert np.array_equal(
- hyd.obsdata, hydload.obsdata
- ), "Written hydmod data not equal to loaded hydmod data"
+ assert np.array_equal(hyd.obsdata, hydload.obsdata), (
+ "Written hydmod data not equal to loaded hydmod data"
+ )
# test obsdata as recarray
obsdata = np.array(
@@ -64,9 +64,9 @@ def test_hydmodfile_load(function_tmpdir, hydmod_model_path):
pth = hydmod_model_path / "test1tr.hyd"
hydload = ModflowHyd.load(pth, m)
- assert np.array_equal(
- hydref.obsdata, hydload.obsdata
- ), "Written hydmod data not equal to loaded hydmod data"
+ assert np.array_equal(hydref.obsdata, hydload.obsdata), (
+ "Written hydmod data not equal to loaded hydmod data"
+ )
def test_hydmodfile_read(hydmod_model_path):
@@ -101,9 +101,9 @@ def test_hydmodfile_read(hydmod_model_path):
data = h.get_data()
assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
- assert (
- len(data.dtype.names) == nitems + 1
- ), f"data column length is not {len(nitems + 1)}"
+ assert len(data.dtype.names) == nitems + 1, (
+ f"data column length is not {len(nitems + 1)}"
+ )
for idx in range(ntimes):
df = h.get_dataframe(idx=idx, timeunit="S")
@@ -163,9 +163,9 @@ def test_mf6obsfile_read(mf6_obs_model_path):
data = h.get_data()
assert data.shape == (len(times),), f"data shape is not ({len(times)},)"
- assert (
- len(data.dtype.names) == nitems + 1
- ), f"data column length is not {len(nitems + 1)}"
+ assert len(data.dtype.names) == nitems + 1, (
+ f"data column length is not {len(nitems + 1)}"
+ )
for idx in range(ntimes):
df = h.get_dataframe(idx=idx, timeunit="S")
diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py
index f74de60af..dfbe1332c 100644
--- a/autotest/test_lake_connections.py
+++ b/autotest/test_lake_connections.py
@@ -244,14 +244,14 @@ def test_lake(function_tmpdir, example_data_path):
bedleak=5e-9,
)
- assert (
- pakdata_dict[0] == 54
- ), f"number of lake connections ({pakdata_dict[0]}) not equal to 54."
+ assert pakdata_dict[0] == 54, (
+ f"number of lake connections ({pakdata_dict[0]}) not equal to 54."
+ )
- assert (
- len(connectiondata) == 54
- ), "number of lake connectiondata entries ({}) not equal to 54.".format(
- len(connectiondata)
+ assert len(connectiondata) == 54, (
+ "number of lake connectiondata entries ({}) not equal to 54.".format(
+ len(connectiondata)
+ )
)
lak_pak_data = []
@@ -433,14 +433,14 @@ def test_embedded_lak_ex01(function_tmpdir, example_data_path):
bedleak=0.1,
)
- assert (
- pakdata_dict[0] == 57
- ), f"number of lake connections ({pakdata_dict[0]}) not equal to 57."
+ assert pakdata_dict[0] == 57, (
+ f"number of lake connections ({pakdata_dict[0]}) not equal to 57."
+ )
- assert (
- len(connectiondata) == 57
- ), "number of lake connectiondata entries ({}) not equal to 57.".format(
- len(connectiondata)
+ assert len(connectiondata) == 57, (
+ "number of lake connectiondata entries ({}) not equal to 57.".format(
+ len(connectiondata)
+ )
)
lak_pak_data = []
@@ -554,9 +554,9 @@ def test_embedded_lak_prudic(example_data_path):
idomain0_test[lakibd > 0] = 0
idomain_test = idomain.copy()
idomain[0, :, :] = idomain0_test
- assert np.array_equal(
- idomain_rev, idomain_test
- ), "idomain not updated correctly with lakibd"
+ assert np.array_equal(idomain_rev, idomain_test), (
+ "idomain not updated correctly with lakibd"
+ )
@requires_exe("mf6")
diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py
index 7ea5ab6b0..7fdd34f4c 100644
--- a/autotest/test_mf6.py
+++ b/autotest/test_mf6.py
@@ -479,9 +479,9 @@ def test_subdir(function_tmpdir):
sim_ws=sim.simulation_data.mfpath.get_sim_path(),
)
gwf_r = sim_r.get_model()
- assert (
- gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry()
- ), "Something wrong with model external paths"
+ assert gwf.dis.delc.get_file_entry() == gwf_r.dis.delc.get_file_entry(), (
+ "Something wrong with model external paths"
+ )
sim_r.set_all_data_internal()
sim_r.set_all_data_external(external_data_folder=os.path.join("dat", "dat_l2"))
@@ -492,9 +492,9 @@ def test_subdir(function_tmpdir):
sim_ws=sim_r.simulation_data.mfpath.get_sim_path(),
)
gwf_r2 = sim_r.get_model()
- assert (
- gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry()
- ), "Something wrong with model external paths"
+ assert gwf_r.dis.delc.get_file_entry() == gwf_r2.dis.delc.get_file_entry(), (
+ "Something wrong with model external paths"
+ )
@requires_exe("mf6")
@@ -516,7 +516,7 @@ def test_binary_write(function_tmpdir, layered):
idomain_data.append(
{
"factor": 1.0,
- "filename": f"idomain_l{k+1}.bin",
+ "filename": f"idomain_l{k + 1}.bin",
"data": 1,
"binary": True,
"iprn": 1,
@@ -524,7 +524,7 @@ def test_binary_write(function_tmpdir, layered):
)
botm_data.append(
{
- "filename": f"botm_l{k+1}.bin",
+ "filename": f"botm_l{k + 1}.bin",
"binary": True,
"iprn": 1,
"data": np.full(shape2d, botm[k], dtype=float),
@@ -532,7 +532,7 @@ def test_binary_write(function_tmpdir, layered):
)
strt_data.append(
{
- "filename": f"strt_l{k+1}.bin",
+ "filename": f"strt_l{k + 1}.bin",
"binary": True,
"iprn": 1,
"data": np.full(shape2d, strt[k], dtype=float),
@@ -1538,9 +1538,9 @@ def test_output_add_observation(function_tmpdir, example_data_path):
# check that .output finds the newly added OBS package
sfr_obs = gwf.sfr.output.obs()
- assert isinstance(
- sfr_obs, Mf6Obs
- ), "remove and add observation test (Mf6Output) failed"
+ assert isinstance(sfr_obs, Mf6Obs), (
+ "remove and add observation test (Mf6Output) failed"
+ )
@requires_exe("mf6")
diff --git a/autotest/test_model_dot_plot.py b/autotest/test_model_dot_plot.py
index f4cfb377a..a19af34e5 100644
--- a/autotest/test_model_dot_plot.py
+++ b/autotest/test_model_dot_plot.py
@@ -49,9 +49,9 @@ def test_dataset_dot_plot_nlay_ne_plottable(function_tmpdir, example_data_path):
ml = Modflow.load("ibs2k.nam", "mf2k", model_ws=loadpth, check=False)
# special case where nlay != plottable
ax = ml.bcf6.vcont.plot()
- assert isinstance(
- ax, plt.Axes
- ), "ml.bcf6.vcont.plot() ax is is not of type plt.Axes"
+ assert isinstance(ax, plt.Axes), (
+ "ml.bcf6.vcont.plot() ax is is not of type plt.Axes"
+ )
def test_model_dot_plot_export(function_tmpdir, example_data_path):
diff --git a/autotest/test_model_splitter.py b/autotest/test_model_splitter.py
index 09de92752..d7e6de922 100644
--- a/autotest/test_model_splitter.py
+++ b/autotest/test_model_splitter.py
@@ -460,19 +460,19 @@ def test_empty_packages(function_tmpdir):
m0 = new_sim.get_model(f"{base_name}_0")
m1 = new_sim.get_model(f"{base_name}_1")
- assert not m0.get_package(
- name="chd_0"
- ), f"Empty CHD file written to {base_name}_0 model"
- assert not m1.get_package(
- name="wel_0"
- ), f"Empty WEL file written to {base_name}_1 model"
+ assert not m0.get_package(name="chd_0"), (
+ f"Empty CHD file written to {base_name}_0 model"
+ )
+ assert not m1.get_package(name="wel_0"), (
+ f"Empty WEL file written to {base_name}_1 model"
+ )
mvr_status0 = m0.sfr.mover.array
mvr_status1 = m0.sfr.mover.array
- assert (
- mvr_status0 and mvr_status1
- ), "Mover status being overwritten in options splitting"
+ assert mvr_status0 and mvr_status1, (
+ "Mover status being overwritten in options splitting"
+ )
@requires_exe("mf6")
@@ -1295,7 +1295,7 @@ def build_gwt_model(sim, gwtname, rch_package):
if diff > 10.25:
raise AssertionError(
f"Difference between output arrays: "
- f"{diff :.2f} greater than tolerance"
+ f"{diff:.2f} greater than tolerance"
)
diff --git a/autotest/test_modflow.py b/autotest/test_modflow.py
index 304b4f6f9..47c883013 100644
--- a/autotest/test_modflow.py
+++ b/autotest/test_modflow.py
@@ -357,9 +357,9 @@ def test_mf6_update_grid(example_data_path):
mg = gwf.modelgrid
gwf.dis.top = 12
- assert np.allclose(
- gwf.dis.top.array, gwf.modelgrid.top
- ), "StructuredGrid failed dynamic update test"
+ assert np.allclose(gwf.dis.top.array, gwf.modelgrid.top), (
+ "StructuredGrid failed dynamic update test"
+ )
# disv
ml_path = example_data_path / "mf6" / "test003_gwfs_disv"
@@ -368,9 +368,9 @@ def test_mf6_update_grid(example_data_path):
mg = gwf.modelgrid
gwf.disv.top = 6.12
- assert np.allclose(
- gwf.disv.top.array, gwf.modelgrid.top
- ), "VertexGrid failed dynamic update test"
+ assert np.allclose(gwf.disv.top.array, gwf.modelgrid.top), (
+ "VertexGrid failed dynamic update test"
+ )
# disu
ml_path = example_data_path / "mf6" / "test006_gwf3"
@@ -379,9 +379,9 @@ def test_mf6_update_grid(example_data_path):
mg = gwf.modelgrid
gwf.disu.top = 101
- assert np.allclose(
- gwf.disu.top.array, gwf.modelgrid.top
- ), "UnstructuredGrid failed dynamic update test"
+ assert np.allclose(gwf.disu.top.array, gwf.modelgrid.top), (
+ "UnstructuredGrid failed dynamic update test"
+ )
def test_load_twri_grid(example_data_path):
@@ -394,9 +394,9 @@ def test_load_twri_grid(example_data_path):
assert mg.shape == shape, f"modelgrid shape {mg.shape} not equal to {shape}"
thickness = mg.cell_thickness
shape = (5, 15, 15)
- assert (
- thickness.shape == shape
- ), f"cell_thickness shape {thickness.shape} not equal to {shape}"
+ assert thickness.shape == shape, (
+ f"cell_thickness shape {thickness.shape} not equal to {shape}"
+ )
def test_mg(function_tmpdir):
diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py
index 27f745633..e17a4ab95 100644
--- a/autotest/test_mp6.py
+++ b/autotest/test_mp6.py
@@ -451,15 +451,15 @@ def test_mp6_timeseries_load(example_data_path):
def eval_timeseries(file):
ts = TimeseriesFile(file)
- assert isinstance(
- ts, TimeseriesFile
- ), f"{os.path.basename(file)} is not an instance of TimeseriesFile"
+ assert isinstance(ts, TimeseriesFile), (
+ f"{os.path.basename(file)} is not an instance of TimeseriesFile"
+ )
# get the all of the data
tsd = ts.get_alldata()
- assert (
- len(tsd) > 0
- ), f"could not load data using get_alldata() from {os.path.basename(file)}."
+ assert len(tsd) > 0, (
+ f"could not load data using get_alldata() from {os.path.basename(file)}."
+ )
# get the data for the last particleid
partid = ts.get_maxid()
@@ -476,8 +476,7 @@ def eval_timeseries(file):
timemax = ts.get_maxtime() / 2.0
assert timemax is not None, (
- "could not get maximum time using get_maxtime() from "
- f"{os.path.basename(file)}."
+ f"could not get maximum time using get_maxtime() from {os.path.basename(file)}."
)
tsd = ts.get_alldata(totim=timemax)
@@ -488,8 +487,7 @@ def eval_timeseries(file):
timemax = ts.get_maxtime()
assert timemax is not None, (
- "could not get maximum time using get_maxtime() from "
- f"{os.path.basename(file)}."
+ f"could not get maximum time using get_maxtime() from {os.path.basename(file)}."
)
tsd = ts.get_alldata(totim=timemax, ge=False)
diff --git a/autotest/test_nwt_ag.py b/autotest/test_nwt_ag.py
index 8a76aa08e..67bc10f7f 100644
--- a/autotest/test_nwt_ag.py
+++ b/autotest/test_nwt_ag.py
@@ -55,12 +55,12 @@ def test_load_write_agwater_uzf(function_tmpdir, example_data_path):
dis2 = ModflowDis(ml2, nlay=1, nrow=15, ncol=10, nper=49)
uzf2 = ModflowUzf1.load(function_tmpdir / uzffile, ml2)
- assert np.allclose(
- uzf1.air_entry.array, uzf2.air_entry.array
- ), "Air entry pressure array comparison failed"
- assert np.allclose(
- uzf1.hroot.array, uzf2.hroot.array
- ), "root pressure array comparison failed"
- assert np.allclose(
- uzf1.rootact.array, uzf2.rootact.array
- ), "root activity array comparison failed"
+ assert np.allclose(uzf1.air_entry.array, uzf2.air_entry.array), (
+ "Air entry pressure array comparison failed"
+ )
+ assert np.allclose(uzf1.hroot.array, uzf2.hroot.array), (
+ "root pressure array comparison failed"
+ )
+ assert np.allclose(uzf1.rootact.array, uzf2.rootact.array), (
+ "root activity array comparison failed"
+ )
diff --git a/autotest/test_plot_cross_section.py b/autotest/test_plot_cross_section.py
index c478b6374..9ab96480b 100644
--- a/autotest/test_plot_cross_section.py
+++ b/autotest/test_plot_cross_section.py
@@ -21,9 +21,9 @@ def test_cross_section_bc_gwfs_disv(example_data_path):
assert len(ax.collections) != 0, "Boundary condition was not drawn"
for col in ax.collections:
- assert isinstance(
- col, PatchCollection
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, PatchCollection), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -40,9 +40,9 @@ def test_cross_section_bc_lake2tr(example_data_path):
assert len(ax.collections) != 0, "Boundary condition was not drawn"
for col in ax.collections:
- assert isinstance(
- col, PatchCollection
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, PatchCollection), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -58,9 +58,9 @@ def test_cross_section_bc_2models_mvr(example_data_path):
assert len(ax.collections) > 0, "Boundary condition was not drawn"
for col in ax.collections:
- assert isinstance(
- col, PatchCollection
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, PatchCollection), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -77,9 +77,9 @@ def test_cross_section_bc_UZF_3lay(example_data_path):
assert len(ax.collections) != 0, "Boundary condition was not drawn"
for col in ax.collections:
- assert isinstance(
- col, PatchCollection
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, PatchCollection), (
+ f"Unexpected collection type: {type(col)}"
+ )
def structured_square_grid(side: int = 10, thick: int = 10):
diff --git a/autotest/test_plot_map_view.py b/autotest/test_plot_map_view.py
index bf6befb88..641fa5817 100644
--- a/autotest/test_plot_map_view.py
+++ b/autotest/test_plot_map_view.py
@@ -96,9 +96,9 @@ def test_map_view_bc_gwfs_disv(example_data_path):
raise AssertionError("Boundary condition was not drawn")
for col in ax.collections:
- assert isinstance(
- col, (QuadMesh, PathCollection)
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, (QuadMesh, PathCollection)), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -116,9 +116,9 @@ def test_map_view_bc_lake2tr(example_data_path):
raise AssertionError("Boundary condition was not drawn")
for col in ax.collections:
- assert isinstance(
- col, (QuadMesh, PathCollection)
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, (QuadMesh, PathCollection)), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -143,9 +143,9 @@ def test_map_view_bc_2models_mvr(example_data_path):
assert len(ax.collections) > 0, "Boundary condition was not drawn"
for col in ax.collections:
- assert isinstance(
- col, (QuadMesh, PathCollection)
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, (QuadMesh, PathCollection)), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.mf6
@@ -163,9 +163,9 @@ def test_map_view_bc_UZF_3lay(example_data_path):
raise AssertionError("Boundary condition was not drawn")
for col in ax.collections:
- assert isinstance(
- col, (QuadMesh, PathCollection)
- ), f"Unexpected collection type: {type(col)}"
+ assert isinstance(col, (QuadMesh, PathCollection)), (
+ f"Unexpected collection type: {type(col)}"
+ )
@pytest.mark.parametrize("ndim", [1, 2, 3])
diff --git a/autotest/test_postprocessing.py b/autotest/test_postprocessing.py
index fc9bef359..15cb602f5 100644
--- a/autotest/test_postprocessing.py
+++ b/autotest/test_postprocessing.py
@@ -399,13 +399,13 @@ def test_flowja_residuals(function_tmpdir, mf6_freyberg_path):
ax = fig.add_subplot(1, 1, 1, aspect="equal")
mm = PlotMapView(model=gwf, ax=ax)
r0 = mm.plot_array(residual)
- assert isinstance(
- r0, matplotlib.collections.QuadMesh
- ), "r0 not type matplotlib.collections.QuadMesh"
+ assert isinstance(r0, matplotlib.collections.QuadMesh), (
+ "r0 not type matplotlib.collections.QuadMesh"
+ )
q0 = mm.plot_vector(qx, qy)
- assert isinstance(
- q0, matplotlib.quiver.Quiver
- ), "q0 not type matplotlib.quiver.Quiver"
+ assert isinstance(q0, matplotlib.quiver.Quiver), (
+ "q0 not type matplotlib.quiver.Quiver"
+ )
mm.plot_grid(lw=0.5, color="black")
mm.plot_ibound()
plt.colorbar(r0, shrink=0.5)
@@ -555,6 +555,6 @@ def test_get_sat_thickness_gradients(function_tmpdir):
assert np.nansum(np.abs(dh / dz - grad[:, 1, 0])) < 1e-6
sat_thick = m.modelgrid.saturated_thickness(hds, mask=nodata)
- assert (
- np.abs(np.sum(sat_thick[:, 1, 1] - np.array([0.2, 1.0, 1.0]))) < 1e-6
- ), "failed saturated thickness comparison (grid.thick())"
+ assert np.abs(np.sum(sat_thick[:, 1, 1] - np.array([0.2, 1.0, 1.0]))) < 1e-6, (
+ "failed saturated thickness comparison (grid.thick())"
+ )
diff --git a/autotest/test_swr_binaryread.py b/autotest/test_swr_binaryread.py
index 202219324..8ebb3ed4d 100644
--- a/autotest/test_swr_binaryread.py
+++ b/autotest/test_swr_binaryread.py
@@ -44,9 +44,9 @@ def test_swr_binary_stage(swr_test_path, ipos):
r = sobj.get_data(idx=idx)
assert r is not None, "SwrStage could not read data with get_data(idx=)"
assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 2
- ), "SwrStage stage data dtype does not have 2 entries"
+ assert len(r.dtype.names) == 2, (
+ "SwrStage stage data dtype does not have 2 entries"
+ )
kswrkstpkper = sobj.get_kswrkstpkper()
assert kswrkstpkper.shape == (
@@ -56,13 +56,13 @@ def test_swr_binary_stage(swr_test_path, ipos):
for kkk in kswrkstpkper:
r = sobj.get_data(kswrkstpkper=kkk)
- assert (
- r is not None
- ), "SwrStage could not read data with get_data(kswrkstpkper=)"
+ assert r is not None, (
+ "SwrStage could not read data with get_data(kswrkstpkper=)"
+ )
assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 2
- ), "SwrStage stage data dtype does not have 2 entries"
+ assert len(r.dtype.names) == 2, (
+ "SwrStage stage data dtype does not have 2 entries"
+ )
times = sobj.get_times()
assert len(times) == 336, "SwrStage times length does not equal 336"
@@ -71,15 +71,15 @@ def test_swr_binary_stage(swr_test_path, ipos):
r = sobj.get_data(totim=time)
assert r is not None, "SwrStage could not read data with get_data(tottim=)"
assert r.shape == (18,), "SwrStage stage data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 2
- ), "SwrStage stage data dtype does not have 2 entries"
+ assert len(r.dtype.names) == 2, (
+ "SwrStage stage data dtype does not have 2 entries"
+ )
ts = sobj.get_ts(irec=17)
assert ts.shape == (336,), "SwrStage stage timeseries shape does not equal (336,)"
- assert (
- len(ts.dtype.names) == 2
- ), "SwrStage stage time series stage data dtype does not have 2 entries"
+ assert len(ts.dtype.names) == 2, (
+ "SwrStage stage time series stage data dtype does not have 2 entries"
+ )
# plt.plot(ts['totim'], ts['stage'])
# plt.show()
@@ -114,13 +114,13 @@ def test_swr_binary_budget(swr_test_path, ipos):
for kkk in kswrkstpkper:
r = sobj.get_data(kswrkstpkper=kkk)
- assert (
- r is not None
- ), "SwrBudget could not read data with get_data(kswrkstpkper=)"
+ assert r is not None, (
+ "SwrBudget could not read data with get_data(kswrkstpkper=)"
+ )
assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 15
- ), "SwrBudget budget data dtype does not have 15 entries"
+ assert len(r.dtype.names) == 15, (
+ "SwrBudget budget data dtype does not have 15 entries"
+ )
times = sobj.get_times()
assert len(times) == 336, "SwrBudget times length does not equal 336"
@@ -129,15 +129,15 @@ def test_swr_binary_budget(swr_test_path, ipos):
r = sobj.get_data(totim=time)
assert r is not None, "SwrBudget could not read data with get_data(tottim=)"
assert r.shape == (18,), "SwrBudget budget data shape does not equal (18,)"
- assert (
- len(r.dtype.names) == 15
- ), "SwrBudget budget data dtype does not have 15 entries"
+ assert len(r.dtype.names) == 15, (
+ "SwrBudget budget data dtype does not have 15 entries"
+ )
ts = sobj.get_ts(irec=17)
assert ts.shape == (336,), "SwrBudget budget timeseries shape does not equal (336,)"
- assert (
- len(ts.dtype.names) == 15
- ), "SwrBudget time series budget data dtype does not have 15 entries"
+ assert len(ts.dtype.names) == 15, (
+ "SwrBudget time series budget data dtype does not have 15 entries"
+ )
# plt.plot(ts['totim'], ts['qbcflow'])
# plt.show()
@@ -190,15 +190,15 @@ def test_swr_binary_qm(swr_test_path, ipos):
ts = sobj.get_ts(irec=17, iconn=16)
assert ts.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)"
- assert (
- len(ts.dtype.names) == 3
- ), "SwrFlow time series qm data dtype does not have 3 entries"
+ assert len(ts.dtype.names) == 3, (
+ "SwrFlow time series qm data dtype does not have 3 entries"
+ )
ts2 = sobj.get_ts(irec=16, iconn=17)
assert ts2.shape == (336,), "SwrFlow qm timeseries shape does not equal (336,)"
- assert (
- len(ts2.dtype.names) == 3
- ), "SwrFlow time series qm data dtype does not have 3 entries"
+ assert len(ts2.dtype.names) == 3, (
+ "SwrFlow time series qm data dtype does not have 3 entries"
+ )
# plt.plot(ts['totim'], ts['velocity'])
# plt.plot(ts2['totim'], ts2['velocity'])
@@ -221,9 +221,9 @@ def test_swr_binary_qaq(swr_test_path, ipos):
r = sobj.get_data(idx=idx)
assert r is not None, "SwrExchange could not read data with get_data(idx=)"
assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
- assert (
- len(r.dtype.names) == 11
- ), "SwrExchange qaq data dtype does not have 11 entries"
+ assert len(r.dtype.names) == 11, (
+ "SwrExchange qaq data dtype does not have 11 entries"
+ )
# plt.bar(range(21), r['qaq'])
# plt.show()
@@ -236,13 +236,13 @@ def test_swr_binary_qaq(swr_test_path, ipos):
for kkk in kswrkstpkper:
r = sobj.get_data(kswrkstpkper=kkk)
- assert (
- r is not None
- ), "SwrExchange could not read data with get_data(kswrkstpkper=)"
+ assert r is not None, (
+ "SwrExchange could not read data with get_data(kswrkstpkper=)"
+ )
assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
- assert (
- len(r.dtype.names) == 11
- ), "SwrExchange qaq data dtype does not have 11 entries"
+ assert len(r.dtype.names) == 11, (
+ "SwrExchange qaq data dtype does not have 11 entries"
+ )
times = sobj.get_times()
assert len(times) == 350, "SwrExchange times length does not equal 350"
@@ -251,15 +251,15 @@ def test_swr_binary_qaq(swr_test_path, ipos):
r = sobj.get_data(totim=time)
assert r is not None, "SwrExchange could not read data with get_data(tottim=)"
assert r.shape == (21,), "SwrExchange qaq data shape does not equal (21,)"
- assert (
- len(r.dtype.names) == 11
- ), "SwrExchange qaq data dtype does not have 11 entries"
+ assert len(r.dtype.names) == 11, (
+ "SwrExchange qaq data dtype does not have 11 entries"
+ )
ts = sobj.get_ts(irec=17, klay=0)
assert ts.shape == (350,), "SwrExchange timeseries shape does not equal (350,)"
- assert (
- len(ts.dtype.names) == 11
- ), "SwrExchange time series qaq data dtype does not have 11 entries"
+ assert len(ts.dtype.names) == 11, (
+ "SwrExchange time series qaq data dtype does not have 11 entries"
+ )
# plt.plot(ts['totim'], ts['qaq'])
# plt.show()
@@ -281,9 +281,9 @@ def test_swr_binary_structure(swr_test_path, ipos):
r = sobj.get_data(idx=idx)
assert r is not None, "SwrStructure could not read data with get_data(idx=)"
assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
- assert (
- len(r.dtype.names) == 8
- ), "SwrStructure structure data dtype does not have 8 entries"
+ assert len(r.dtype.names) == 8, (
+ "SwrStructure structure data dtype does not have 8 entries"
+ )
kswrkstpkper = sobj.get_kswrkstpkper()
assert kswrkstpkper.shape == (
@@ -293,13 +293,13 @@ def test_swr_binary_structure(swr_test_path, ipos):
for kkk in kswrkstpkper:
r = sobj.get_data(kswrkstpkper=kkk)
- assert (
- r is not None
- ), "SwrStructure could not read data with get_data(kswrkstpkper=)"
+ assert r is not None, (
+ "SwrStructure could not read data with get_data(kswrkstpkper=)"
+ )
assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
- assert (
- len(r.dtype.names) == 8
- ), "SwrStructure structure data dtype does not have 8 entries"
+ assert len(r.dtype.names) == 8, (
+ "SwrStructure structure data dtype does not have 8 entries"
+ )
times = sobj.get_times()
assert len(times) == 336, "SwrStructure times length does not equal 336"
@@ -308,15 +308,15 @@ def test_swr_binary_structure(swr_test_path, ipos):
r = sobj.get_data(totim=time)
assert r is not None, "SwrStructure could not read data with get_data(tottim=)"
assert r.shape == (2,), "SwrStructure structure data shape does not equal (2,)"
- assert (
- len(r.dtype.names) == 8
- ), "SwrStructure structure data dtype does not have 8 entries"
+ assert len(r.dtype.names) == 8, (
+ "SwrStructure structure data dtype does not have 8 entries"
+ )
ts = sobj.get_ts(irec=17, istr=0)
assert ts.shape == (336,), "SwrStructure timeseries shape does not equal (336,)"
- assert (
- len(ts.dtype.names) == 8
- ), "SwrStructure time series structure data dtype does not have 8 entries"
+ assert len(ts.dtype.names) == 8, (
+ "SwrStructure time series structure data dtype does not have 8 entries"
+ )
# plt.plot(ts['totim'], ts['strflow'])
# plt.show()
diff --git a/autotest/test_usg.py b/autotest/test_usg.py
index f9e03c6db..2b9b2178f 100644
--- a/autotest/test_usg.py
+++ b/autotest/test_usg.py
@@ -84,9 +84,9 @@ def test_usg_sms_load(function_tmpdir, mfusg_01A_nestedgrid_nognc_model_path):
for (key1, value1), (key2, value2) in zip(
sms2.__dict__.items(), sms.__dict__.items()
):
- assert (
- value1 == value2
- ), f"key1 {key1}, value 1 {value1} != key2 {key2} value 2 {value2}"
+ assert value1 == value2, (
+ f"key1 {key1}, value 1 {value1} != key2 {key2} value 2 {value2}"
+ )
@requires_exe("mfusg")
diff --git a/flopy/datbase.py b/flopy/datbase.py
index 2ae5d83a7..1ba24eb97 100644
--- a/flopy/datbase.py
+++ b/flopy/datbase.py
@@ -78,6 +78,5 @@ def to_array(self):
def masked_4D_arrays_itr(self):
def masked_4D_arrays_itr(self):
raise NotImplementedError(
- "must define masked_4D_arrays_itr in child "
- "class to use this base class"
+ "must define masked_4D_arrays_itr in child class to use this base class"
)
diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py
index e13092329..04e738ecc 100644
--- a/flopy/export/netcdf.py
+++ b/flopy/export/netcdf.py
@@ -348,10 +348,10 @@ def append(self, other, suffix="_1"):
attrs["long_name"] += " " + suffix
else:
continue
- assert (
- new_vname not in self.nc.variables.keys()
- ), "var already exists:{} in {}".format(
- new_vname, ",".join(self.nc.variables.keys())
+ assert new_vname not in self.nc.variables.keys(), (
+ "var already exists:{} in {}".format(
+ new_vname, ",".join(self.nc.variables.keys())
+ )
)
attrs["max"] = var[:].max()
attrs["min"] = var[:].min()
@@ -481,9 +481,9 @@ def difference(self, other, minuend="self", mask_zero_diff=True, onlydiff=True):
"""
- assert (
- self.nc is not None
- ), "can't call difference() if nc hasn't been populated"
+ assert self.nc is not None, (
+ "can't call difference() if nc hasn't been populated"
+ )
netCDF4 = import_optional_dependency("netCFD4")
@@ -1133,10 +1133,10 @@ def create_variable(
raise Exception(f"duplicate variable name: {name}")
self.log(f"creating variable: {name}")
- assert (
- precision_str in PRECISION_STRS
- ), "netcdf.create_variable() error: precision string {} not in {}".format(
- precision_str, PRECISION_STRS
+ assert precision_str in PRECISION_STRS, (
+ "netcdf.create_variable() error: precision string {} not in {}".format(
+ precision_str, PRECISION_STRS
+ )
)
if self.nc is None:
diff --git a/flopy/export/utils.py b/flopy/export/utils.py
index d6902d41e..a40af73b9 100644
--- a/flopy/export/utils.py
+++ b/flopy/export/utils.py
@@ -784,15 +784,14 @@ def generic_array_export(
"""
if (isinstance(f, str) or isinstance(f, Path)) and Path(f).suffix.lower() == ".nc":
assert "model" in kwargs.keys(), (
- "creating a new netCDF using generic_array_helper requires a "
- "'model' kwarg"
+ "creating a new netCDF using generic_array_helper requires a 'model' kwarg"
)
assert isinstance(kwargs["model"], BaseModel)
f = NetCdf(f, kwargs.pop("model"), **kwargs)
- assert array.ndim == len(
- dimensions
- ), "generic_array_helper() array.ndim != dimensions"
+ assert array.ndim == len(dimensions), (
+ "generic_array_helper() array.ndim != dimensions"
+ )
coords_dims = {
"time": "time",
"layer": "layer",
@@ -1135,9 +1134,9 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs):
"""
- assert isinstance(
- u3d, DataInterface
- ), "array3d_export only helps instances that support DataInterface"
+ assert isinstance(u3d, DataInterface), (
+ "array3d_export only helps instances that support DataInterface"
+ )
min_valid = kwargs.get("min_valid", -1.0e9)
max_valid = kwargs.get("max_valid", 1.0e9)
@@ -1291,9 +1290,9 @@ def array2d_export(f: Union[str, os.PathLike], u2d, fmt=None, verbose=False, **k
if fmt is set to 'vtk', parameters of Vtk initializer
"""
- assert isinstance(
- u2d, DataInterface
- ), "util2d_helper only helps instances that support DataInterface"
+ assert isinstance(u2d, DataInterface), (
+ "util2d_helper only helps instances that support DataInterface"
+ )
assert len(u2d.array.shape) == 2, "util2d_helper only supports 2D arrays"
min_valid = kwargs.get("min_valid", -1.0e9)
diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py
index 7f15c2516..8416a4e4c 100644
--- a/flopy/export/vtk.py
+++ b/flopy/export/vtk.py
@@ -63,8 +63,7 @@ def add_timevalue(self, file, timevalue):
file = file.with_suffix(".vtu")
record = (
- f'\n'
+ f'\n'
)
self.__data.append(record)
diff --git a/flopy/mfusg/mfusgdisu.py b/flopy/mfusg/mfusgdisu.py
index 617cfff98..75ff9cb61 100644
--- a/flopy/mfusg/mfusgdisu.py
+++ b/flopy/mfusg/mfusgdisu.py
@@ -532,8 +532,7 @@ def load(cls, f, model, ext_unit_dict=None, check=True):
# dataset 1
if model.verbose:
print(
- " loading NODES, NLAY, NJAG, IVSD, NPER, ITMUNI, LENUNI,"
- " IDSYMRD..."
+ " loading NODES, NLAY, NJAG, IVSD, NPER, ITMUNI, LENUNI, IDSYMRD..."
)
ll = line.strip().split()
nodes = int(ll.pop(0))
diff --git a/flopy/mfusg/mfusgsms.py b/flopy/mfusg/mfusgsms.py
index afd00d11c..179f1636f 100644
--- a/flopy/mfusg/mfusgsms.py
+++ b/flopy/mfusg/mfusgsms.py
@@ -530,8 +530,7 @@ def load(cls, f, model, ext_unit_dict=None):
if linmeth == 1 and nopt == 0:
if model.verbose:
print(
- " loading IACL NORDER LEVEL NORTH "
- "IREDSYS RRCTOL IDROPTOL EPSRN"
+ " loading IACL NORDER LEVEL NORTH IREDSYS RRCTOL IDROPTOL EPSRN"
)
while True:
line = f.readline()
diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py
index dbf717ea3..eb47d3c18 100644
--- a/flopy/modflow/mf.py
+++ b/flopy/modflow/mf.py
@@ -140,9 +140,9 @@ def __init__(
# -- check if unstructured is specified for something
# other than mfusg is specified
if not self.structured:
- assert (
- "mfusg" in self.version
- ), "structured=False can only be specified for mfusg models"
+ assert "mfusg" in self.version, (
+ "structured=False can only be specified for mfusg models"
+ )
# external option stuff
self.array_free_format = True
diff --git a/flopy/modflow/mflak.py b/flopy/modflow/mflak.py
index e6bb4d0b7..5d2482a3e 100644
--- a/flopy/modflow/mflak.py
+++ b/flopy/modflow/mflak.py
@@ -315,8 +315,7 @@ def __init__(
for idx, fname in enumerate(tab_files, 1):
if fname is None:
raise ValueError(
- "a filename must be specified for the "
- f"tabfile for lake {idx}"
+ f"a filename must be specified for the tabfile for lake {idx}"
)
# set unit for tab files if not passed to __init__
if tab_units is None:
diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py
index f16e90191..fe4f78f05 100644
--- a/flopy/modflow/mfmnw1.py
+++ b/flopy/modflow/mfmnw1.py
@@ -136,9 +136,9 @@ def __init__(
# -input format checks:
lossTypes = ["skin", "linear", "nonlinear"]
- assert (
- self.losstype.lower() in lossTypes
- ), f"LOSSTYPE ({self.losstype}) must be one of the following: {lossTypes}"
+ assert self.losstype.lower() in lossTypes, (
+ f"LOSSTYPE ({self.losstype}) must be one of the following: {lossTypes}"
+ )
self.parent.add_package(self)
@staticmethod
diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py
index 1eb1ca8db..974efd6e5 100644
--- a/flopy/modflow/mfmnwi.py
+++ b/flopy/modflow/mfmnwi.py
@@ -323,12 +323,12 @@ def write_file(self):
line += f"{qhbflag:5d} "
if nitems == 5:
concflag = t[4]
- assert (
- 0 <= concflag <= 3
- ), "CONCflag must be an integer between 0 and 3."
- assert isinstance(
- concflag, int
- ), "CONCflag must be an integer between 0 and 3."
+ assert 0 <= concflag <= 3, (
+ "CONCflag must be an integer between 0 and 3."
+ )
+ assert isinstance(concflag, int), (
+ "CONCflag must be an integer between 0 and 3."
+ )
line += f"{concflag:5d} "
line += "\n"
f.write(line)
diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py
index c3f6e546d..1abd2ce50 100644
--- a/flopy/modflow/mfsfr2.py
+++ b/flopy/modflow/mfsfr2.py
@@ -1622,9 +1622,9 @@ def _write_1c(self, f_sfr):
def _write_reach_data(self, f_sfr):
# Write the recarray (data) to the file (or file handle) f
- assert isinstance(
- self.reach_data, np.recarray
- ), "MfList.__tofile() data arg not a recarray"
+ assert isinstance(self.reach_data, np.recarray), (
+ "MfList.__tofile() data arg not a recarray"
+ )
# decide which columns to write
columns = _get_item2_names(
diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py
index 33f70d193..ffa0a70dd 100644
--- a/flopy/modpath/mp6.py
+++ b/flopy/modpath/mp6.py
@@ -147,8 +147,7 @@ def __init__(
)
if self.dis_file is None:
raise ValueError(
- "the dis file in the MODFLOW model or passed "
- "to __init__ cannot be None"
+ "the dis file in the MODFLOW model or passed to __init__ cannot be None"
)
if self.__mf is None:
diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py
index 67fda5a1a..e27e67a80 100644
--- a/flopy/modpath/mp7.py
+++ b/flopy/modpath/mp7.py
@@ -209,8 +209,7 @@ def __init__(
shape = (nlay, nrow, ncol)
if dis is None:
raise Exception(
- "DIS, or DISU packages must be "
- "included in the passed MODFLOW model"
+ "DIS, or DISU packages must be included in the passed MODFLOW model"
)
elif dis is not None and shape is None:
nlay, nodes = dis.nlay, dis.nodes
@@ -303,8 +302,7 @@ def __init__(
)
if self.dis_file is None and self.grbdis_file is None:
raise ValueError(
- "the dis file in the MODFLOW model or passed "
- "to __init__ cannot be None"
+ "the dis file in the MODFLOW model or passed to __init__ cannot be None"
)
# set ib and ibound
diff --git a/flopy/modpath/mp7particlegroup.py b/flopy/modpath/mp7particlegroup.py
index 37df44646..e5edd36a9 100644
--- a/flopy/modpath/mp7particlegroup.py
+++ b/flopy/modpath/mp7particlegroup.py
@@ -51,8 +51,7 @@ def __init__(self, particlegroupname, filename, releasedata):
if releasedata is None:
raise ValueError(
- "releasedata must be provided to instantiate "
- "a MODPATH 7 particle group"
+ "releasedata must be provided to instantiate a MODPATH 7 particle group"
)
# convert releasedata to a list, if required
diff --git a/flopy/modpath/mp7sim.py b/flopy/modpath/mp7sim.py
index 3df35c139..b8bf3f592 100644
--- a/flopy/modpath/mp7sim.py
+++ b/flopy/modpath/mp7sim.py
@@ -470,8 +470,9 @@ def __init__(
else:
if len(timepointdata) != 2:
raise ValueError(
- "timepointdata must be a have 2 entries "
- "({} provided)".format(len(timepointdata))
+ "timepointdata must be a have 2 entries ({} provided)".format(
+ len(timepointdata)
+ )
)
else:
if isinstance(timepointdata[1], (list, tuple)):
diff --git a/flopy/mt3d/mtsft.py b/flopy/mt3d/mtsft.py
index 2bebd4aa8..76abac30f 100644
--- a/flopy/mt3d/mtsft.py
+++ b/flopy/mt3d/mtsft.py
@@ -553,21 +553,18 @@ def load(cls, f, model, nsfinit=None, nper=None, ncomp=None, ext_unit_dict=None)
print(f" IETSFR {ietsfr}")
if ietsfr == 0:
print(
- " Mass does not exit the model via simulated "
- "stream evaporation "
+ " Mass does not exit the model via simulated stream evaporation "
)
else:
print(
- " Mass exits the stream network via simulated "
- "stream evaporation "
+ " Mass exits the stream network via simulated stream evaporation "
)
# Item 2 (ISFSOLV, WIMP, WUPS, CCLOSESF, MXITERSF, CRNTSF, IPRTXMD)
line = f.readline()
if model.verbose:
print(
- " loading isfsolv, wimp, wups, cclosesf, mxitersf, "
- "crntsf, iprtxmd..."
+ " loading isfsolv, wimp, wups, cclosesf, mxitersf, crntsf, iprtxmd..."
)
vals = line.strip().split()
diff --git a/flopy/pakbase.py b/flopy/pakbase.py
index 127596c93..a279f3bd1 100644
--- a/flopy/pakbase.py
+++ b/flopy/pakbase.py
@@ -744,8 +744,7 @@ def _confined_layer_check(self, chk):
continue
if confined and l > 0:
desc = (
- "\r LAYTYP: unconfined (convertible) "
- "layer below confined layer"
+ "\r LAYTYP: unconfined (convertible) layer below confined layer"
)
chk._add_to_summary(type="Warning", desc=desc)
@@ -949,7 +948,7 @@ def load(
if toption.lower() == "autoflowreduce":
options.append(toption.lower())
elif toption.lower() == "iunitafr":
- options.append(f"{toption.lower()} {t[it+1]}")
+ options.append(f"{toption.lower()} {t[it + 1]}")
it += 1
it += 1
diff --git a/flopy/pest/tplarray.py b/flopy/pest/tplarray.py
index 09609f4e7..771d59613 100644
--- a/flopy/pest/tplarray.py
+++ b/flopy/pest/tplarray.py
@@ -122,8 +122,7 @@ def add_parameter(self, p):
if "layers" in p.span and "idx" in p.span:
if p.span["idx"] is not None:
raise Exception(
- "For a Util3d object, cannot have layers and "
- "idx in parameter.span"
+ "For a Util3d object, cannot have layers and idx in parameter.span"
)
if "layers" in p.span:
diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py
index 7771bc59d..8c1bfec41 100644
--- a/flopy/plot/plotutil.py
+++ b/flopy/plot/plotutil.py
@@ -959,7 +959,7 @@ def _plot_transient2d_helper(
name = transient2d.name.replace("_", "").upper()
axes = []
for idx, kper in enumerate(range(k0, k1)):
- title = f"{name} stress period {kper + 1 :d}"
+ title = f"{name} stress period {kper + 1:d}"
if filename_base is not None:
filename = f"{filename_base}_{name}_{kper + 1:05d}.{fext}"
diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py
index badd18155..12913a609 100644
--- a/flopy/utils/gridgen.py
+++ b/flopy/utils/gridgen.py
@@ -384,9 +384,9 @@ def add_active_domain(self, feature, layers):
)
# make sure shapefile exists
- assert (
- shapefile_path and shapefile_path.is_file()
- ), f"Shapefile does not exist: {shapefile_path}"
+ assert shapefile_path and shapefile_path.is_file(), (
+ f"Shapefile does not exist: {shapefile_path}"
+ )
# store shapefile info
self._addict[shapefile_path.stem] = relpath_safe(shapefile_path, self.model_ws)
@@ -436,9 +436,9 @@ def add_refinement_features(self, features, featuretype, level, layers):
)
# make sure shapefile exists
- assert (
- shapefile_path and shapefile_path.is_file()
- ), f"Shapefile does not exist: {shapefile_path}"
+ assert shapefile_path and shapefile_path.is_file(), (
+ f"Shapefile does not exist: {shapefile_path}"
+ )
# store shapefile info
self._rfdict[shapefile_path.stem] = [
diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py
index b40736eae..9cd927335 100644
--- a/flopy/utils/lgrutil.py
+++ b/flopy/utils/lgrutil.py
@@ -957,9 +957,9 @@ def get_disv_gridprops(self):
"""
# check
- assert (
- self.lgr.ncppl.min() == self.lgr.ncppl.max()
- ), "Exporting disv grid properties requires ncppl to be 1."
+ assert self.lgr.ncppl.min() == self.lgr.ncppl.max(), (
+ "Exporting disv grid properties requires ncppl to be 1."
+ )
assert self.lgr.nlayp == self.lgr.nlay, (
"Exporting disv grid properties requires parent and child models "
"to have the same number of layers."
diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py
index f15fee32c..bf1ac8ea3 100644
--- a/flopy/utils/mflistfile.py
+++ b/flopy/utils/mflistfile.py
@@ -557,8 +557,7 @@ def get_reduced_pumping(self):
# Check if reduced pumping data was set to be written
# to list file
check_str = (
- "WELLS WITH REDUCED PUMPING WILL BE REPORTED "
- "TO THE MAIN LISTING FILE"
+ "WELLS WITH REDUCED PUMPING WILL BE REPORTED TO THE MAIN LISTING FILE"
)
check_str_ag = "AG WELLS WITH REDUCED PUMPING FOR STRESS PERIOD"
diff --git a/flopy/utils/postprocessing.py b/flopy/utils/postprocessing.py
index 3e7728357..02f5f0991 100644
--- a/flopy/utils/postprocessing.py
+++ b/flopy/utils/postprocessing.py
@@ -611,8 +611,7 @@ def get_specific_discharge(
else:
raise IndexError(
- "Classical budget components must have "
- "the same shape as the modelgrid"
+ "Classical budget components must have the same shape as the modelgrid"
)
else:
spdis = vectors
diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py
index 41c0e4410..b83025a90 100644
--- a/flopy/utils/util_array.py
+++ b/flopy/utils/util_array.py
@@ -74,9 +74,9 @@ class ArrayFormat:
"""
def __init__(self, u2d, python=None, fortran=None, array_free_format=None):
- assert isinstance(
- u2d, Util2d
- ), f"ArrayFormat only supports Util2d, not {type(u2d)}"
+ assert isinstance(u2d, Util2d), (
+ f"ArrayFormat only supports Util2d, not {type(u2d)}"
+ )
if len(u2d.shape) == 1:
self._npl_full = u2d.shape[0]
else:
@@ -105,8 +105,7 @@ def __init__(self, u2d, python=None, fortran=None, array_free_format=None):
if python is not None and fortran is not None:
raise Exception(
- "only one of [python,fortran] can be passed "
- "to ArrayFormat constructor"
+ "only one of [python,fortran] can be passed to ArrayFormat constructor"
)
if python is not None:
@@ -136,8 +135,7 @@ def _set_defaults(self):
self._decimal = self.default_float_decimal
else:
raise Exception(
- "ArrayFormat._set_defaults() error: "
- f"unsupported dtype: {self.dtype!s}"
+ f"ArrayFormat._set_defaults() error: unsupported dtype: {self.dtype!s}"
)
def __str__(self):
@@ -786,10 +784,10 @@ def build_2d_instances(self):
if isinstance(self.__value, list) or (
isinstance(self.__value, np.ndarray) and (self.__value.ndim == 1)
):
- assert (
- len(self.__value) == self.shape[0]
- ), "length of 3d enumerable: {} != to shape[0]: {}".format(
- len(self.__value), self.shape[0]
+ assert len(self.__value) == self.shape[0], (
+ "length of 3d enumerable: {} != to shape[0]: {}".format(
+ len(self.__value), self.shape[0]
+ )
)
for i, item in enumerate(self.__value):
@@ -1090,14 +1088,12 @@ def __setitem__(self, key, value):
key = int(key)
except Exception as e:
raise Exception(
- "Transient3d.__setitem__() error: "
- f"'key'could not be cast to int:{e!s}"
+ f"Transient3d.__setitem__() error: 'key'could not be cast to int:{e!s}"
)
nper = self._model.nper
if key > self._model.nper or key < 0:
raise Exception(
- "Transient3d.__setitem__() error: "
- f"key {key} not in nper range 0:{nper}"
+ f"Transient3d.__setitem__() error: key {key} not in nper range 0:{nper}"
)
self.transient_3ds[key] = self.__get_3d_instance(key, value)
@@ -1537,14 +1533,12 @@ def __setitem__(self, key, value):
key = int(key)
except Exception as e:
raise Exception(
- "Transient2d.__setitem__() error: "
- f"'key'could not be cast to int:{e!s}"
+ f"Transient2d.__setitem__() error: 'key'could not be cast to int:{e!s}"
)
nper = self._model.nper
if key > self._model.nper or key < 0:
raise Exception(
- "Transient2d.__setitem__() error: "
- f"key {key} not in nper range 0:{nper}"
+ f"Transient2d.__setitem__() error: key {key} not in nper range 0:{nper}"
)
self.transient_2ds[key] = self.__get_2d_instance(key, value)
@@ -2247,9 +2241,9 @@ def get_file_entry(self, how=None):
else:
how = "external"
if how == "internal":
- assert (
- not self.format.binary
- ), "Util2d error: 'how' is internal, but format is binary"
+ assert not self.format.binary, (
+ "Util2d error: 'how' is internal, but format is binary"
+ )
cr = self.get_internal_cr()
return cr + self.string
@@ -2305,9 +2299,9 @@ def get_file_entry(self, how=None):
elif how == "constant":
if self.vtype not in [np.int32, np.float32]:
u = np.unique(self._array)
- assert (
- u.shape[0] == 1
- ), "Util2d error: 'how' is constant, but array is not uniform"
+ assert u.shape[0] == 1, (
+ "Util2d error: 'how' is constant, but array is not uniform"
+ )
value = u[0]
else:
value = self.__value
@@ -2788,9 +2782,9 @@ def load(
fname = fname.replace('"', "")
fname = fname.replace("\\", os.path.sep)
fname = os.path.join(model.model_ws, fname)
- assert os.path.exists(
- fname
- ), f"Util2d.load() error: open/close file {fname} not found"
+ assert os.path.exists(fname), (
+ f"Util2d.load() error: open/close file {fname} not found"
+ )
if "binary" not in cr_dict["fmtin"].lower():
f = open(fname, "r")
data = Util2d.load_txt(
diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py
index 8593e5e79..3eeac01a8 100644
--- a/flopy/utils/util_list.py
+++ b/flopy/utils/util_list.py
@@ -347,8 +347,7 @@ def __cast_data(self, data):
self.__vtype[kper] = None
else:
raise ValueError(
- "MfList error: unsupported data type: "
- f"{type(d)} at kper {kper}"
+ f"MfList error: unsupported data type: {type(d)} at kper {kper}"
)
# A single dataframe
@@ -623,9 +622,9 @@ def write_transient(
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
- assert hasattr(
- f, "read"
- ), "MfList.write() error: f argument must be a file handle"
+ assert hasattr(f, "read"), (
+ "MfList.write() error: f argument must be a file handle"
+ )
kpers = list(self.data.keys())
pak_name_str = self.package.__class__.__name__.lower()
if (len(kpers) == 0) and (pak_name_str == "mfusgwel"): # must be cln wels
@@ -726,9 +725,9 @@ def write_transient(
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
- assert isinstance(
- data, np.recarray
- ), "MfList.__tofile() data arg not a recarray"
+ assert isinstance(data, np.recarray), (
+ "MfList.__tofile() data arg not a recarray"
+ )
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]