diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..987f1d1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,167 @@ +name: CI +on: + push: + branches: + - main + - develop* + paths-ignore: + - '**.md' + pull_request: + branches: + - main + - develop* + paths-ignore: + - '**.md' +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.7 + cache: 'pip' + cache-dependency-path: setup.cfg + + - name: Install Python packages + run: | + pip install . + pip install ".[lint]" + + - name: Run isort + run: isort --verbose --check --diff modflow_devtools + + - name: Run black + run: black --check --diff modflow_devtools + + - name: Run flake8 + run: flake8 --count --show-source --exit-zero modflow_devtools + + - name: Run pylint + run: pylint --jobs=0 --errors-only --exit-zero modflow_devtools + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.7 + + - name: Upgrade pip and install build and twine + run: | + pip install --upgrade pip + pip install build twine + + - name: Base modflow_devtools installation + run: | + pip --verbose install . + + - name: Print package version + run: | + python -c "import modflow_devtools; print(modflow_devtools.__version__)" + + - name: Build package + run: | + python -m build + + - name: Check distribution + run: | + twine check --strict dist/* + + test: + name: Test + needs: + - build + - lint + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, macos-latest, windows-latest ] + python: [ 3.7, 3.8, 3.9, "3.10" ] + steps: + + - name: Checkout repo + uses: actions/checkout@v3 + with: + path: modflow-devtools + + - name: Checkout modflow6 + uses: actions/checkout@v3 + with: + repository: MODFLOW-USGS/modflow6 + path: modflow6 + + - name: Checkout modflow6 examples + uses: actions/checkout@v3 + with: + repository: MODFLOW-USGS/modflow6-examples + path: modflow6-examples + + - name: Checkout modflow6 test models + uses: actions/checkout@v3 + with: + repository: MODFLOW-USGS/modflow6-testmodels + path: modflow6-testmodels + + - name: Checkout modflow6 large test models + uses: actions/checkout@v3 + with: + repository: MODFLOW-USGS/modflow6-largetestmodels + path: modflow6-largetestmodels + + - name: Install executables + uses: modflowpy/install-modflow-action@v1 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + cache: 'pip' + cache-dependency-path: | + modflow-devtools/setup.cfg + modflow6-examples/etc/requirements*.txt + + - name: Install Python packages + working-directory: modflow-devtools + run: | + pip install . + pip install ".[test]" + + - name: Cache modflow6 examples + id: cache-examples + uses: actions/cache@v3 + with: + path: modflow6-examples/examples + key: modflow6-examples-${{ hashFiles('modflow6-examples/data/**') }} + + - name: Install extra Python packages + if: steps.cache-examples.outputs.cache-hit != 'true' + working-directory: modflow6-examples/etc + run: | + pip install -r requirements.pip.txt + pip install -r requirements.usgs.txt + + - name: Build modflow6 example models + if: steps.cache-examples.outputs.cache-hit != 'true' + working-directory: modflow6-examples/etc + run: python ci_build_files.py + + - name: Run tests + working-directory: modflow-devtools + env: + BIN_PATH: ~/.local/bin/modflow + REPOS_PATH: ${{ github.workspace }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: pytest -v -n auto --durations 0 \ No newline at end of file diff --git a/.github/workflows/modflow-devtools-linting-install.yml b/.github/workflows/modflow-devtools-linting-install.yml deleted file mode 100644 index deb76db..0000000 --- a/.github/workflows/modflow-devtools-linting-install.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: modflow_devtools linting/installation - -on: - schedule: - - cron: '0 3 * * 3' # run at 3 AM UTC every Wednesday - push: - branches: - - main - - develop - pull_request: - branches: - - develop -jobs: - - - modflow-devtools_lint: - name: modflow_devtools linting - runs-on: ubuntu-latest - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: 3.9 - - - name: Install packages - run: | - pip install numpy flopy pylint flake8 black requests - - - name: Run isort - run: | - echo "if isort check fails update isort using" - echo " pip install isort --upgrade" - echo "and run" - echo " isort ./modflow_devtools" - echo "and then commit the changes." - isort --verbose --check --diff ./modflow_devtools - - - name: Run black - run: | - echo "if black check fails update black using" - echo " pip install black --upgrade" - echo "and run" - echo " black ./modflow_devtools" - echo "and then commit the changes." - black --check --diff ./modflow_devtools - - - name: Run flake8 - run: flake8 --count --show-source ./modflow_devtools - - - name: Run pylint - run: pylint --jobs=0 --errors-only ./modflow_devtools - - modflow-devtools_setup: - name: standard installation - runs-on: ubuntu-latest - strategy: - fail-fast: false - defaults: - run: - shell: bash - - steps: - - # check out repo - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - - name: Setup Python - uses: actions/setup-python@v2.2.2 - with: - python-version: 3.9 - - - name: Upgrade pip and install build and twine - run: | - python -m pip install --upgrade pip - pip install build twine - - - name: Base modflow_devtools installation - run: | - pip --verbose install . - - - name: Print modflow_devtools version - run: | - python -c "import modflow_devtools; print(modflow_devtools.__version__)" - - - name: Build modflow_devtools, check dist outputs - run: | - python -m build - twine check --strict dist/* - diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..1b0ce22 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,52 @@ +name: Publish release and package +on: + push: + branches: + - master +jobs: + release: + name: Release + runs-on: ubuntu-latest + steps: + + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.7 + + - name: Upgrade pip and install build and twine + run: | + pip install --upgrade pip + pip install build twine + + - name: Base modflow_devtools installation + run: | + pip --verbose install . + + - name: Print package version + run: | + python -c "import modflow_devtools; print(modflow_devtools.__version__)" + + - name: Build package + run: | + python -m build + + - name: Check distribution + run: | + twine check --strict dist/* + + - name: Create release + uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + + # todo: set repo secrets and enable + # - name: Publish package + # env: + # TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + # TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + # run: | + # twine upload dist/* \ No newline at end of file diff --git a/DEVELOPER.md b/DEVELOPER.md new file mode 100644 index 0000000..a329978 --- /dev/null +++ b/DEVELOPER.md @@ -0,0 +1,64 @@ +# Developing `modflow-devtools` + +This document provides guidance to set up a development environment and discusses conventions used in this project. + + + + +- [Installation](#installation) +- [Testing](#testing) + - [Environment variables](#environment-variables) + - [Running the tests](#running-the-tests) + - [Writing new tests](#writing-new-tests) + - [Temporary directories](#temporary-directories) + + + +## Installation + +To get started, first fork and clone this repository. Then install the project and core packages as well as linting and testing dependencies: + +```shell +pip install . +pip install ".[lint, test]" +``` + +## Testing + +This repository's tests use [`pytest`](https://docs.pytest.org/en/latest/) and several plugins. + +### Environment variables + +This repository's tests expect a few environment variables: + +- `BIN_PATH`: path to MODFLOW 6 and related executables +- `REPOS_PATH`: the path to MODFLOW 6 example model repositories +- `GITHUB_TOKEN`: a GitHub authentication token + +These may be set manually, but the recommended approach is to configure environment variables in a `.env` file in the project root, for instance: + +``` +BIN_PATH=/path/to/modflow/executables +REPOS_PATH=/path/to/repos +GITHUB_TOKEN=yourtoken... +``` + +The tests use [`pytest-dotenv`](https://github.com/quiqua/pytest-dotenv) to detect and load variables from this file. + +**Note:** at minimum, the tests require that the `mf6` executable is present in `BIN_PATH`. + +### Running the tests + +Tests should be run from the project root. To run the tests in parallel with verbose output: + +```shell +pytest -v -n auto +``` + +### Writing new tests + +Tests should follow a few conventions for ease of use and maintenance. + +#### Temporary directories + +Tests which must write to disk should use `pytest`'s built-in `temp_dir` fixture or one of this package's own scoped temporary directory fixtures. diff --git a/MANIFEST.in b/MANIFEST.in index 65a3425..9d616eb 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ global-exclude .DS_Store *.pyc *.pyo *.pyd *.swp *.bak *~ .* *.sh *.yml *.md *.toml exclude autotest/* include pyproject.toml -include modflow_devtools/utilities/usgsprograms.txt diff --git a/README.md b/README.md index 254f8df..655fcf4 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,316 @@ -# modflow-devtools -python tools for MODFLOW development +# MODFLOW developer tools + +[![Project Status: WIP – Initial development is in progress, but there has not yet been a stable, usable release suitable for the public.](https://www.repostatus.org/badges/latest/wip.svg)](https://www.repostatus.org/#wip) +[![CI](https://github.com/MODFLOW-USGS/modflow-devtools/actions/workflows/ci.yml/badge.svg)](https://github.com/MODFLOW-USGS/modflow-devtools/actions/workflows/ci.yml) + +Python tools for MODFLOW development and testing. + + + + +- [Requirements](#requirements) +- [Installation](#installation) +- [Included](#included) + - [`MFZipFile` class](#mfzipfile-class) + - [Keepable temporary directory fixtures](#keepable-temporary-directory-fixtures) + - [Model-loading fixtures](#model-loading-fixtures) + - [Test models](#test-models) + - [Example scenarios](#example-scenarios) + - [Reusable test case framework](#reusable-test-case-framework) + - [Executables container](#executables-container) + - [Conditionally skipping tests](#conditionally-skipping-tests) + - [Miscellaneous](#miscellaneous) + - [Generating TOCs with `doctoc`](#generating-tocs-with-doctoc) + - [Testing CI workflows with `act`](#testing-ci-workflows-with-act) +- [MODFLOW Resources](#modflow-resources) + + + +## Requirements + +This package requires Python3.7+. Its only dependencies are `numpy` and `pytest`. + +## Installation + +This package is not yet published to PyPI or a Conda channel. To install it please see the [developer documentation](DEVELOPER.md). + +## Included + +This package contains shared tools for developing and testing MODFLOW 6 and FloPy, including standalone utilities as well as `pytest` fixtures, CLI options, and test parametrizations: + +- a `ZipFile` subclass preserving file attributes +- variably-scoped `pytest` temporary directory fixtures +- a `pytest` smoke test CLI option (to run a fast subset of cases) +- a minimal `pytest` framework for reusing test functions and data +- a `pytest_generate_tests` hook to load example/test model fixtures +- a set of `pytest` markers to conditionally skip test cases based on + - operating system + - Python packages installed + - executables available on the path + +To import `pytest` fixtures in a project consuming `modflow-devtools`, add the following to a `conftest.py` file in the project root: + +```python +pytest_plugins = [ "modflow_devtools.fixtures" ] +``` + +Note that `pytest` requires this to be a top-level `conftest.py` living in your project root. Nested `conftest.py` files may override or extend this package's behavior. + +### `MFZipFile` class + +Python's `ZipFile` doesn't preserve execute permissions. The `MFZipFile` subclass modifies `ZipFile.extract()` to do so, as per the recommendation [here](https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries). + +### Keepable temporary directory fixtures + +Tests often need to exercise code that reads from and/or writes to disk. The test harness may also need to create test data during setup and clean up the filesystem on teardown. Temporary directories are built into `pytest` via the [`tmp_path`](https://docs.pytest.org/en/latest/how-to/tmp_path.html#the-tmp-path-fixture) and `tmp_path_factory` fixtures. + +Several fixtures are provided in `modflow_devtools/fixtures.py` to extend the behavior of temporary directories for test functions: + +- `function_tmpdir` +- `module_tmpdir` +- `class_tmpdir` +- `session_tmpdir` + +These are automatically created before test code runs and lazily removed afterwards, subject to the same [cleanup procedure](https://docs.pytest.org/en/latest/how-to/tmp_path.html#the-default-base-temporary-directory) used by the default `pytest` temporary directory fixtures. Their purpose is to allow test artifacts to be saved in a user-specified location when `pytest` is invoked with a `--keep` option — this can be useful to debug failing tests. + +```python +from pathlib import Path +import inspect + +def test_tmpdirs(function_tmpdir, module_tmpdir): + # function-scoped temporary directory + assert function_tmpdir.is_dir() + assert inspect.currentframe().f_code.co_name in function_tmpdir.stem + + # module-scoped temp dir (accessible to other tests in the script) + assert module_tmpdir.is_dir() + + with open(function_tmpdir / "test.txt", "w") as f1, open(module_tmpdir / "test.txt", "w") as f2: + f1.write("hello, function") + f2.write("hello, module") +``` + +Any files written to the temporary directory will be saved to saved to subdirectories named according to the test case, class or module. To keep files created by a test case like above, run: + +```shell +pytest --keep +``` + +There is also a `--keep-failed ` option which preserves outputs only from failing test cases. + +### Model-loading fixtures + +Fixtures are provided to load models from the MODFLOW 6 example and test model repositories and feed them to test functions. Models can be loaded from: + +- [`MODFLOW-USGS/modflow6-examples`](https://github.com/MODFLOW-USGS/modflow6-examples) +- [`MODFLOW-USGS/modflow6-testmodels`](https://github.com/MODFLOW-USGS/modflow6-testmodels) +- [`MODFLOW-USGS/modflow6-largetestmodels`](https://github.com/MODFLOW-USGS/modflow6-largetestmodels) + +These models can be requested like any other `pytest` fixture, by adding one of the following parameters to test functions: + +- `test_model_mf5to6` +- `test_model_mf6` +- `large_test_model` +- `example_scenario` + +To use these fixtures, the environment variable `REPOS_PATH` must point to the location of model repositories on the filesystem. Model repositories must live side-by-side in this location. If `REPOS_PATH` is not configured, test functions requesting these fixtures will be skipped. + +**Note**: example models must be built by running the `ci_build_files.py` script in `modflow6-examples/etc` before running tests using the `example_scenario` fixture. + +#### Test models + +The `test_model_mf5to6`, `test_model_mf6` and `large_test_model` fixtures are each a `Path` to the directory containing the model's namefile. For instance, to load `mf5to6` models from the [`MODFLOW-USGS/modflow6-testmodels`](https://github.com/MODFLOW-USGS/modflow6-testmodels) repository: + +```python +def test_mf5to6_model(tmpdir, testmodel_mf5to6): + assert testmodel_mf5to6.is_dir() +``` + +This test function will be parametrized with all `mf5to6` models found in the `testmodels` repository (likewise for `mf6` models, and for large test models in their own repository). + +#### Example scenarios + +The [`MODFLOW-USGS/modflow6-examples`](https://github.com/MODFLOW-USGS/modflow6-examples) repository contains a collection of scenarios, each consisting of 1 or more models. The `example_scenario` fixture is a `Tuple[str, List[Path]]`. The first item is the name of the scenario. The second item is a list of namefile `Path`s, ordered alphabetically by name. Model naming conventions are as follows: + +- groundwater flow models begin with prefix `gwf*` +- transport models begin with `gwt*` + +Ordering as above permits models to be run directly in the order provided, with transport models potentially consuming the outputs of flow models. A straightforward pattern is to loop over models and run each in a subdirectory of the same top-level working directory. + +```python +def test_example_scenario(tmpdir, example_scenario): + name, namefiles = example_scenario + for namefile in namefiles: + model_ws = tmpdir / namefile.parent.name + model_ws.mkdir() + # load and run model + # ... +``` + +### Reusable test case framework + +A second approach to testing, more flexible than loading pre-existing models from a repository, is to construct test models in code. This typically involves defining variables or `pytest` fixtures in the same test script as the test function. While this pattern is effective for manually defined scenarios, it tightly couples test functions to test cases, prevents easy reuse of the test case by other tests, and tends to lead to duplication, as each test script may reproduce similar test functions and data-generation procedures. + +This package provides a minimal framework for self-describing test cases which can be defined once and plugged into arbitrary test functions. At its core is the `Case` class, which is just a `SimpleNamespace` with a few defaults and a `copy_update()` method for easy modification. This pairs nicely with [`pytest-cases`](https://smarie.github.io/python-pytest-cases/), which is recommended but not required. + +A `Case` requires only a `name`, and has a single default attribute, `xfail=False`, indicating whether the test case is expected to succeed. (Test functions may of course choose to use or ignore this.) + +For instance, to generate a set of similar test cases with `pytest-cases`: + +```python +from pytest_cases import parametrize + +from modflow_devtools.case import Case + +template = Case(name="QA") +cases = [ + template.copy_update(name=template.name + "1", question="What's the meaning of life, the universe, and everything?", answer=42), + template.copy_update(name=template.name + "2", question="Is a Case immutable?", answer="No, but it's better not to mutate it.") +] + +@parametrize(data=cases, ids=[c.name for c in cases]) +def case_qa(case): + print(case.name, case.question, case.answer) +``` + +### Executables container + +The `Executables` class is just a mapping between executable names and paths on the filesystem. This can be useful to test multiple versions of the same program, and is easily injected into test functions as a fixture: + +```python +from os import environ +from pathlib import Path +import subprocess +import sys + +import pytest + +from modflow_devtools.misc import get_suffixes +from modflow_devtools.executables import Executables + +_bin_path = Path("~/.local/bin/modflow").expanduser() +_dev_path = Path(environ.get("BIN_PATH")).absolute() +_ext, _ = get_suffixes(sys.platform) + +@pytest.fixture +@pytest.mark.skipif(not (_bin_path.is_dir() and _dev_path.is_dir())) +def exes(): + return Executables( + mf6_rel=_bin_path / f"mf6{_ext}", + mf6_dev=_dev_path / f"mf6{_ext}" + ) + +def test_exes(exes): + print(subprocess.check_output([f"{exes.mf6_rel}", "-v"]).decode('utf-8')) + print(subprocess.check_output([f"{exes.mf6_dev}", "-v"]).decode('utf-8')) +``` + +### Conditionally skipping tests + +Several `pytest` markers are provided to conditionally skip tests based on executable availability, Python package environment or operating system. + +To skip tests if one or more executables are not available on the path: + +```python +from shutil import which +from modflow_devtools.markers import requires_exe + +@requires_exe("mf6") +def test_mf6(): + assert which("mf6") + +@requires_exe("mf6", "mp7") +def test_mf6_and_mp7(): + assert which("mf6") + assert which("mp7") +``` + +To skip tests if one or more Python packages are not available: + +```python +from modflow_devtools.markers import requires_pkg + +@requires_pkg("pandas") +def test_needs_pandas(): + import pandas as pd + +@requires_pkg("pandas", "shapefile") +def test_needs_pandas(): + import pandas as pd + from shapefile import Reader +``` + +To mark tests requiring or incompatible with particular operating systems: + +```python +import os +import platform +from modflow_devtools.markers import requires_platform, excludes_platform + +@requires_platform("Windows") +def test_needs_windows(): + assert platform.system() == "Windows" + +@excludes_platform("Darwin", ci_only=True) +def test_breaks_osx_ci(): + if "CI" in os.environ: + assert platform.system() != "Darwin" +``` + +Platforms must be specified as returned by `platform.system()`. + +Both these markers accept a `ci_only` flag, which indicates whether the policy should only apply when the test is running on GitHub Actions CI. + +Markers are also provided to ping network resources and skip if unavailable: + +- `@requires_github`: skips if `github.com` is unreachable +- `@requires_spatial_reference`: skips if `spatialreference.org` is unreachable + +### Miscellaneous + +A few other useful tools for MODFLOW 6 and FloPy development include: + +- [`doctoc`](https://www.npmjs.com/package/doctoc): automatically generate table of contents sections for markdown files +- [`act`](https://github.com/nektos/act): test GitHub Actions workflows locally (requires Docker) + +#### Generating TOCs with `doctoc` + +The [`doctoc`](https://www.npmjs.com/package/doctoc) tool can be used to automatically generate table of contents sections for markdown files. `doctoc` is distributed with the [Node Package Manager](https://docs.npmjs.com/cli/v7/configuring-npm/install). With Node installed use `npm install -g doctoc` to install `doctoc` globally. Then just run `doctoc `, e.g.: + +```shell +doctoc DEVELOPER.md +``` + +This will insert HTML comments surrounding an automatically edited region, scanning for headers and creating an appropriately indented TOC tree. Subsequent runs are idempotent, updating if the file has changed or leaving it untouched if not. + +To run `doctoc` for all markdown files in a particular directory (recursive), use `doctoc some/path`. + +#### Testing CI workflows with `act` + +The [`act`](https://github.com/nektos/act) tool uses Docker to run containerized CI workflows in a simulated GitHub Actions environment. [Docker Desktop](https://www.docker.com/products/docker-desktop/) is required for Mac or Windows and [Docker Engine](https://docs.docker.com/engine/) on Linux. + +With Docker installed and running, run `act -l` from the project root to see available CI workflows. To run all workflows and jobs, just run `act`. To run a particular workflow use `-W`: + +```shell +act -W .github/workflows/commit.yml +``` + +To run a particular job within a workflow, add the `-j` option: + +```shell +act -W .github/workflows/commit.yml -j build +``` + +**Note:** GitHub API rate limits are easy to exceed, especially with job matrices. Authenticated GitHub users have a much higher rate limit: use `-s GITHUB_TOKEN=` when invoking `act` to provide a personal access token. Note that this will log your token in shell history — leave the value blank for a prompt to enter it more securely. + +The `-n` flag can be used to execute a dry run, which doesn't run anything, just evaluates workflow, job and step definitions. See the [docs](https://github.com/nektos/act#example-commands) for more. + +**Note:** `act` can only run Linux-based container definitions, so Mac or Windows workflows or matrix OS entries will be skipped. + + +## MODFLOW Resources + ++ [MODFLOW and Related Programs](https://water.usgs.gov/ogw/modflow/) ++ [Online guide for MODFLOW-2000](https://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/) ++ [Online guide for MODFLOW-2005](https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/) ++ [Online guide for MODFLOW-NWT](https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/) \ No newline at end of file diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..74bcb56 --- /dev/null +++ b/conftest.py @@ -0,0 +1 @@ +pytest_plugins = ["modflow_devtools.fixtures"] diff --git a/modflow_devtools/__init__.py b/modflow_devtools/__init__.py index 5925821..d4c9db5 100644 --- a/modflow_devtools/__init__.py +++ b/modflow_devtools/__init__.py @@ -1,138 +1,7 @@ -"""modflow_devtools is a Python package containing tools for MODFLOW -development.""" - -from .common_regression import ( - get_example_basedir, - get_example_dirs, - get_home_dir, - get_select_dirs, - get_select_packages, - is_directory_available, - set_mf6_regression, -) - -# modflow_devtools -from .config import ( - __author__, - __date__, - __description__, - __email__, - __maintainer__, - __status__, - __version__, -) -from .framework import running_on_CI, set_teardown_test, testing_framework -from .mftest_context import MFTargetType, MFTestContext, MFTestTargets -from .simulation import Simulation, api_return -from .targets import get_mf6_version, get_target_dictionary, run_exe - -# autotest -from .testing.budget_testing import eval_bud_diff -from .testing.testing import ( - compare, - compare_budget, - compare_concs, - compare_heads, - compare_stages, - compare_swrbudget, - get_entries_from_namefile, - get_input_files, - get_mf6_blockdata, - get_mf6_comparison, - get_mf6_files, - get_mf6_ftypes, - get_mf6_mshape, - get_mf6_nper, - get_namefiles, - get_sim_name, - model_setup, - setup_comparison, - setup_mf6, - setup_mf6_comparison, - teardown, -) -from .utilities.binary_file_writer import ( - uniform_flow_field, - write_budget, - write_head, -) -from .utilities.cross_section import ( - calculate_rectchan_mannings_discharge, - get_depths, -) -from .utilities.disu_util import get_disu_kwargs -from .utilities.download import ( - download_and_unzip, - get_repo_assets, - getmfexes, - getmfnightly, - repo_latest_version, - zip_all, -) -from .utilities.mftest_exe import MFTestExe -from .utilities.usgsprograms import usgs_program_data - -# define public interfaces -__all__ = [ - "__version__", - # common_regression - "get_example_basedir", - "get_example_dirs", - "get_home_dir", - "get_select_dirs", - "get_select_packages", - "is_directory_available", - "set_mf6_regression", - # framework - "running_on_CI", - "set_teardown_test", - "testing_framework", - # context - "MFTargetType", - "MFTestTargets", - "MFTestContext", - # simulation - "Simulation", - "api_return", - # targets - "run_exe", - "get_mf6_version", - "get_target_dictionary", - # testing - "eval_bud_diff", - "model_setup", - "setup_comparison", - "teardown", - "get_namefiles", - "get_entries_from_namefile", - "get_sim_name", - "get_input_files", - "compare_budget", - "compare_swrbudget", - "compare_heads", - "compare_concs", - "compare_stages", - "compare", - "setup_mf6", - "setup_mf6_comparison", - "get_mf6_comparison", - "get_mf6_files", - "get_mf6_blockdata", - "get_mf6_ftypes", - "get_mf6_mshape", - "get_mf6_nper", - # utilities - "uniform_flow_field", - "write_head", - "write_budget", - "get_depths", - "calculate_rectchan_mannings_discharge", - "get_disu_kwargs", - "MFTestExe", - "usgs_program_data", - "download_and_unzip", - "getmfexes", - "repo_latest_version", - "get_repo_assets", - "zip_all", -] +__author__ = "Joseph D. Hughes" +__date__ = "March 2, 2022" +__version__ = "0.0.1" +__maintainer__ = "Joseph D. Hughes" +__email__ = "jdhughes@usgs.gov" +__status__ = "Production" +__description__ = """Python tools for MODFLOW development and testing.""" diff --git a/modflow_devtools/case.py b/modflow_devtools/case.py new file mode 100644 index 0000000..cd6e7a2 --- /dev/null +++ b/modflow_devtools/case.py @@ -0,0 +1,39 @@ +from types import SimpleNamespace + + +class Case(SimpleNamespace): + """ + Minimal container for a reusable test case. + """ + + def __init__(self, **kwargs): + if "name" not in kwargs: + raise ValueError(f"Case name is required") + + # set defaults + if "xfail" not in kwargs: + kwargs["xfail"] = False + # if 'compare' not in kwargs: + # kwargs['compare'] = True + + super().__init__(**kwargs) + + def __repr__(self): + return self.name + + def copy(self): + """ + Copies the test case. + """ + + return SimpleNamespace(**self.__dict__.copy()) + + def copy_update(self, **kwargs): + """ + A utility method for copying a test case with changes. + Recommended for dynamically generating similar cases. + """ + + cpy = self.__dict__.copy() + cpy.update(kwargs) + return SimpleNamespace(**cpy) diff --git a/modflow_devtools/common_regression.py b/modflow_devtools/common_regression.py deleted file mode 100644 index 89685db..0000000 --- a/modflow_devtools/common_regression.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -import sys - -import flopy - -from .testing.testing import get_mf6_ftypes, get_namefiles - - -def get_home_dir(): - # determine if CI run - is_CI = "CI" in os.environ - - home = os.path.expanduser("~") - - if is_CI: - if sys.platform.lower() == "win32": - home = os.path.normpath(os.path.join(os.getcwd(), "..", "..")) - else: - cwd_pth = os.getcwd() - - # convert current working directory to a list - cwd_list = cwd_pth.split(sep=os.path.sep) - - # add leading path separator back into list - for idx, pth in enumerate(cwd_list): - if len(pth) < 1: - cwd_list[idx] = os.path.sep - if pth.endswith(":") and sys.platform.lower() == "win32": - cwd_list[idx] += os.path.sep - - ipos = 0 - for idx, s in enumerate(cwd_list): - if s.lower().startswith("modflow6"): - ipos = idx - break - - home = os.path.join(*cwd_list[:ipos]) - - print(f"HOME: {home}") - - return home - - -def set_mf6_regression(): - mf6_regression = True - for arg in sys.argv: - if arg.lower() in ("--original_regression", "-oreg"): - mf6_regression = False - break - return mf6_regression - - -def is_directory_available(example_basedir): - available = False - if example_basedir is not None: - available = os.path.isdir(example_basedir) - if not available: - print(f'"{example_basedir}" does not exist') - print(f"no need to run {os.path.basename(__file__)}") - return available - - -def get_example_basedir(home, find_dir, subdir=None): - example_basedir = None - for root, dirs, files in os.walk(home): - for d in dirs: - if d == find_dir or d == find_dir + ".git": - example_basedir = os.path.join(root, d) - if subdir is not None: - example_basedir = os.path.join(example_basedir, subdir) - break - if example_basedir is not None: - example_basedir = os.path.abspath(example_basedir) - print(f"Example base directory: {example_basedir}") - break - return example_basedir - - -def get_example_dirs(example_basedir, exclude, prefix="test", find_sim=True): - example_dirs = [ - d - for d in os.listdir(example_basedir) - if prefix in d and d not in exclude - ] - - # make sure mfsim.nam is present in each directory - if find_sim: - remove_dirs = [] - # add_dirs = [] - for temp_dir in example_dirs: - epth = os.path.join(example_basedir, temp_dir) - fpth = os.path.join(epth, "mfsim.nam") - if not os.path.isfile(fpth): - remove_dirs.append(temp_dir) - # for sub_dir in ("mf6gwf", "mf6gwt"): - # tpth = os.path.join(epth, sub_dir) - # fpth = os.path.join(tpth, "mfsim.nam") - # if os.path.isfile(fpth): - # add_dirs.append(os.path.join(temp_dir, sub_dir)) - - for remove_dir in remove_dirs: - example_dirs.remove(remove_dir) - - # example_dirs += add_dirs - - # sort in numerical order for case sensitive os - example_dirs = sorted( - example_dirs, key=lambda v: (v.upper(), v[0].islower()) - ) - - return example_dirs - - -def get_select_dirs(select_dirs, dirs): - found_dirs = [] - for d in select_dirs: - if d.endswith("*"): - for test_dir in dirs: - if test_dir.startswith(d.replace("*", "")): - found_dirs.append(test_dir) - elif d.endswith("+"): - dd = d.replace("+", "") - for test_dir in dirs: - sorted_list = sorted([dd, test_dir], reverse=True) - if sorted_list[0] == test_dir: - found_dirs.append(test_dir) - elif d.endswith("-"): - dd = d.replace("-", "") - for test_dir in dirs: - sorted_list = sorted([dd, test_dir]) - if sorted_list[0] == test_dir or dd in sorted_list[0]: - found_dirs.append(test_dir) - else: - if d in dirs: - found_dirs.append(d) - - return found_dirs - - -def get_select_packages(select_packages, exdir, dirs): - found_dirs = [] - for d in dirs: - pth = os.path.join(exdir, d) - namefiles = get_namefiles(pth) - ftypes = [] - for namefile in namefiles: - ftype = get_mf6_ftypes(namefile, select_packages) - if ftype not in ftypes: - ftypes += ftype - if len(ftypes) > 0: - ftypes = [item.upper() for item in ftypes] - for pak in select_packages: - if pak in ftypes: - found_dirs.append(d) - break - return found_dirs diff --git a/modflow_devtools/config.py b/modflow_devtools/config.py deleted file mode 100644 index 6194a26..0000000 --- a/modflow_devtools/config.py +++ /dev/null @@ -1,10 +0,0 @@ -__author__ = "Joseph D. Hughes" -__date__ = "March 2, 2022" -__version__ = "0.0.1" -__maintainer__ = "Joseph D. Hughes" -__email__ = "jdhughes@usgs.gov" -__status__ = "Production" -__description__ = """ -This is the modflow_devtools package that provides functionality used in the -MODFLOW development environment. -""" diff --git a/modflow_devtools/mftest_context.py b/modflow_devtools/context.py similarity index 95% rename from modflow_devtools/mftest_context.py rename to modflow_devtools/context.py index f667b4a..d292beb 100644 --- a/modflow_devtools/mftest_context.py +++ b/modflow_devtools/context.py @@ -1,12 +1,8 @@ -import json import os import subprocess import sys from enum import Enum - -import flopy - -from .utilities.mftest_exe import MFTestExe +from shutil import which class MFTargetType(Enum): @@ -128,9 +124,9 @@ def _target_pth(self, target, target_t=None, is_lib=False): path = self._releasebin if self._use_path: - exe_exists = flopy.which(target) + exe_exists = which(target) else: - exe_exists = flopy.which(target, path=path) + exe_exists = which(target, path=path) if ( exe_exists is None @@ -228,12 +224,6 @@ def __init__( use_path=use_path, ) - self._exe = MFTestExe( - releasebin=self._releasebin, - builtbin=builtbin, - targets=self._targets, - ) - self._update_context() def get_target_dictionary(self): diff --git a/modflow_devtools/utilities/download.py b/modflow_devtools/download.py similarity index 77% rename from modflow_devtools/utilities/download.py rename to modflow_devtools/download.py index 7e24c88..1ea1bd1 100644 --- a/modflow_devtools/utilities/download.py +++ b/modflow_devtools/download.py @@ -16,12 +16,13 @@ import shutil import sys import tarfile -import time import timeit +import urllib.request +from os import PathLike +from pathlib import Path +from typing import Optional from zipfile import ZIP_DEFLATED, ZipFile, ZipInfo -import requests - class MFZipFile(ZipFile): """ZipFile file attributes are not being preserved. This class preserves @@ -179,101 +180,32 @@ def compressall(path, file_pths=None, dir_pths=None, patterns=None): return success -def _request_get(url, verify=True, timeout=1, max_requests=10, verbose=False): - """Make a url request - - Parameters - ---------- - url : str - url address for the zip file - verify : bool - boolean indicating if the url request should be verified - (default is True) - timeout : int - url request time out length (default is 1 seconds) - max_requests : int - number of url download request attempts (default is 10) - verbose : bool - boolean indicating if output will be printed to the terminal - (default is False) - - Returns - ------- - req : request object - request object for url - +def get_request(url, params={}): """ - for idx in range(max_requests): - if verbose: - msg = f"open request attempt {idx + 1} of {max_requests}" - print(msg) - try: - req = requests.get( - url, stream=True, verify=verify, timeout=timeout - ) - except: - if idx < max_requests - 1: - time.sleep(13) - continue - else: - msg = "Cannot open request from:\n" + f" {url}\n\n" - print(msg) - raise requests.HTTPError(msg) - - # successful request - break - - return req - - -def _request_header(url, max_requests=10, verbose=False): - """Get the headers from a url - - Parameters - ---------- - url : str - url address for the zip file - max_requests : int - number of url download request attempts (default is 10) - verbose : bool - boolean indicating if output will be printed to the terminal - (default is False) - - Returns - ------- - header : request header object - request header object for url + Get urllib.request.Request, with parameters and headers. + This bears a GitHub API authentication token if github.com is + in the URL and the GITHUB_TOKEN environment variable is set. """ - for idx in range(max_requests): - if verbose: - msg = f"open request attempt {idx + 1} of {max_requests}" - print(msg) - - header = requests.head(url, allow_redirects=True) - if header.status_code != 200: - if idx < max_requests - 1: - time.sleep(13) - continue - else: - msg = "Cannot open request from:\n" + f" {url}\n\n" - print(msg) - header.raise_for_status() + if isinstance(params, dict): + if len(params) > 0: + url += "?" + urllib.parse.urlencode(params) + else: + raise TypeError("data must be a dict") + headers = {} - # successful header request - break + if "github.com" in url: + github_token = os.environ.get("GITHUB_TOKEN", None) + if github_token: + headers["Authorization"] = f"Bearer {github_token}" - return header + return urllib.request.Request(url, headers=headers) def download_and_unzip( - url, - pth="./", + url: str, + path: Optional[PathLike] = None, delete_zip=True, - verify=True, - timeout=30, - max_requests=10, - chunk_size=2048000, verbose=False, ): """Download and unzip a zip file from a url. @@ -282,19 +214,11 @@ def download_and_unzip( ---------- url : str url address for the zip file - pth : str + path : PathLike path where the zip file will be saved (default is the current path) delete_zip : bool boolean indicating if the zip file should be deleted after it is unzipped (default is True) - verify : bool - boolean indicating if the url request should be verified - timeout : int - url request time out length (default is 30 seconds) - max_requests : int - number of url download request attempts (default is 10) - chunk_size : int - maximum url download request chunk size (default is 2048000 bytes) verbose : bool boolean indicating if output will be printed to the terminal @@ -303,35 +227,32 @@ def download_and_unzip( """ - # create download directory - if not os.path.exists(pth): - if verbose: - print(f"Creating the directory:\n {pth}") - os.makedirs(pth) + path = Path(path if path else os.getcwd()) + path.mkdir(exist_ok=True) if verbose: - print(f"Attempting to download the file:\n {url}") - - # define the filename - file_name = os.path.join(pth, url.split("/")[-1]) + print(f"Downloading {url}") # download the file success = False tic = timeit.default_timer() - # open request - req = _request_get( - url, - verify=verify, - timeout=timeout, - max_requests=max_requests, - verbose=verbose, + def report(chunk, size, total): + complete = chunk * size + percent = round(complete / total * 100) + if verbose: + print(f"{percent}% complete ({complete} bytes of {total})") + + # download zip file + file_path = path / url.split("/")[-1] + _, headers = urllib.request.urlretrieve( + url, filename=str(file_path), reporthook=report ) # get content length, if available tag = "Content-length" - if tag in req.headers: - file_size = req.headers[tag] + if tag in headers: + file_size = headers[tag] len_file_size = len(file_size) file_size = int(file_size) @@ -343,111 +264,38 @@ def download_and_unzip( else: file_size = 0.0 - # download data from url - for idx in range(max_requests): - # print download attempt message - if verbose: - print(f" download attempt: {idx + 1}") - - # connection established - download the file - download_size = 0 - try: - with open(file_name, "wb") as f: - for chunk in req.iter_content(chunk_size=chunk_size): - if chunk: - # increment the counter - download_size += len(chunk) - - # write the chunk - f.write(chunk) - - # write information to the screen - if verbose: - if file_size > 0: - download_percent = float( - download_size - ) / float(file_size) - msg = ( - " downloaded " - + sbfmt.format(bfmt.format(download_size)) - + " of " - + bfmt.format(int(file_size)) - + " bytes" - + f" ({download_percent:10.4%})" - ) - else: - msg = ( - " downloaded " - + sbfmt.format(bfmt.format(download_size)) - + " bytes" - ) - print(msg) - else: - sys.stdout.write(".") - sys.stdout.flush() - - success = True - except: - # reestablish request - req = _request_get( - url, - verify=verify, - timeout=timeout, - max_requests=max_requests, - verbose=verbose, - ) - - # try to download the data again - continue - - # terminate the download attempt loop - if success: - break - # write the total download time toc = timeit.default_timer() - tsec = toc - tic + tsec = round(toc - tic, 2) if verbose: print(f"\ntotal download time: {tsec} seconds") - if success: - if file_size > 0: - if verbose: - print(f"download speed: {file_size / (1e6 * tsec)} MB/s") - else: - msg = f"could not download...{url}" - raise ConnectionError(msg) - # Unzip the file, and delete zip file if successful. - if "zip" in os.path.basename(file_name) or "exe" in os.path.basename( - file_name - ): - z = MFZipFile(file_name) + if "zip" in file_path.suffix or "exe" in file_path.suffix: + z = MFZipFile(file_path) try: - # write a message - if not verbose: - sys.stdout.write("\n") - print(f"uncompressing...'{file_name}'") + if verbose: + print(f"Uncompressing: {file_path}") # extract the files - z.extractall(pth) + z.extractall(str(path)) except: - p = "Could not unzip the file. Stopping." + p = "Could not unzip the file. Stopping." raise Exception(p) z.close() - elif "tar" in os.path.basename(file_name): - ar = tarfile.open(file_name) - ar.extractall(path=pth) + elif "tar" in file_path.suffix: + ar = tarfile.open(file_path) + ar.extractall(path=str(path)) ar.close() # delete the zipfile if delete_zip: if verbose: - print("Deleting the zipfile...") - os.remove(file_name) + print(f"Deleting zipfile {file_path}") + file_path.unlink() if verbose: - print("Done downloading and extracting...\n") + print(f"Done downloading and extracting {file_path.name} to {path}") return success @@ -963,5 +811,3 @@ def getmfnightly( if verbose: print("Removing folder " + download_dir) shutil.rmtree(download_dir) - - return diff --git a/modflow_devtools/executables.py b/modflow_devtools/executables.py new file mode 100644 index 0000000..267bb64 --- /dev/null +++ b/modflow_devtools/executables.py @@ -0,0 +1,96 @@ +import sys +from os import PathLike +from pathlib import Path +from shutil import which +from types import SimpleNamespace +from typing import Dict, Optional +from warnings import warn + +from modflow_devtools.misc import get_suffixes, run_cmd + + +class Executables(SimpleNamespace): + """ + Container mapping executable names to their paths. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @staticmethod + def get_version( + exe="mf6", path: PathLike = None, flag: str = "-v" + ) -> Optional[str]: + """Get the version number of an executable.""" + + pth = Executables.get_path(exe, path) + if not pth: + warn( + f"Executable {exe} not found" + + ("" if not pth else f" at path: {pth}") + ) + return None + + out, err, ret = run_cmd(exe, flag) + if ret == 0: + out = "".join(out).strip() + return out.split(":")[1].strip() + else: + return None + + @staticmethod + def get_path(exe: str = "mf6", path: PathLike = None) -> Optional[Path]: + pth = None + found = None + if path is not None: + pth = Path(path) + found = which(exe, path=str(pth)) + if found is None: + found = which(exe) + + if found is None: + warn( + f"Executable {exe} not found" + + ("" if not pth else f" at path: {pth}") + ) + return found + + return Path(found) + + def as_dict(self) -> Dict[str, Path]: + """ + Returns a dictionary mapping executable names to paths. + """ + + return self.__dict__.copy() + + +def build_default_exe_dict(bin_path: PathLike) -> Dict[str, Path]: + p = Path(bin_path) + d = dict() + + # paths to executables for previous versions of MODFLOW + dl_bin = p / "downloaded" + rb_bin = p / "rebuilt" + + # get platform-specific filename extensions + ext, so = get_suffixes(sys.platform) + + # downloaded executables + d["mf2005"] = Executables.get_path(f"mf2005dbl{ext}", dl_bin) + d["mfnwt"] = Executables.get_path(f"mfnwtdbl{ext}", dl_bin) + d["mfusg"] = Executables.get_path(f"mfusgdbl{ext}", dl_bin) + d["mflgr"] = Executables.get_path(f"mflgrdbl{ext}", dl_bin) + d["mf2005s"] = Executables.get_path(f"mf2005{ext}", dl_bin) + d["mt3dms"] = Executables.get_path(f"mt3dms{ext}", dl_bin) + + # executables rebuilt from last release + d["mf6_regression"] = Executables.get_path(f"mf6{ext}", rb_bin) + + # local development version + d["mf6"] = p / f"mf6{ext}" + d["libmf6"] = p / f"libmf6{so}" + d["mf5to6"] = p / f"mf5to6{ext}" + d["zbud6"] = p / f"zbud6{ext}" + + return d diff --git a/modflow_devtools/fixtures.py b/modflow_devtools/fixtures.py new file mode 100644 index 0000000..ce2fd1b --- /dev/null +++ b/modflow_devtools/fixtures.py @@ -0,0 +1,324 @@ +from collections import OrderedDict +from itertools import groupby +from os import PathLike, environ +from pathlib import Path +from shutil import copytree +from typing import Dict, List, Optional + +import pytest +from modflow_devtools.misc import get_mf6_ftypes, get_models + +# temporary directory fixtures + + +@pytest.fixture(scope="function") +def function_tmpdir(tmpdir_factory, request) -> Path: + node = ( + request.node.name.replace("/", "_") + .replace("\\", "_") + .replace(":", "_") + ) + temp = Path(tmpdir_factory.mktemp(node)) + yield Path(temp) + + keep = request.config.getoption("--keep") + if keep: + copytree(temp, Path(keep) / temp.name) + + keep_failed = request.config.getoption("--keep-failed") + if keep_failed and request.node.rep_call.failed: + copytree(temp, Path(keep_failed) / temp.name) + + +@pytest.fixture(scope="class") +def class_tmpdir(tmpdir_factory, request) -> Path: + assert ( + request.cls is not None + ), "Class-scoped temp dir fixture must be used on class" + temp = Path(tmpdir_factory.mktemp(request.cls.__name__)) + yield temp + + keep = request.config.getoption("--keep") + if keep: + copytree(temp, Path(keep) / temp.name) + + +@pytest.fixture(scope="module") +def module_tmpdir(tmpdir_factory, request) -> Path: + temp = Path(tmpdir_factory.mktemp(request.module.__name__)) + yield temp + + keep = request.config.getoption("--keep") + if keep: + copytree(temp, Path(keep) / temp.name) + print(list((Path(keep) / temp.name).rglob("*"))) + + +@pytest.fixture(scope="session") +def session_tmpdir(tmpdir_factory, request) -> Path: + temp = Path(tmpdir_factory.mktemp(request.session.name)) + yield temp + + keep = request.config.getoption("--keep") + if keep: + copytree(temp, Path(keep) / temp.name) + + +# environment-dependent fixtures + + +@pytest.fixture +def repos_path() -> Optional[Path]: + """Path to directory containing test model and example repositories""" + return environ.get("REPOS_PATH", None) + + +# pytest configuration hooks + + +def pytest_addoption(parser): + parser.addoption( + "-K", + "--keep", + action="store", + default=None, + help="Move the contents of temporary test directories to correspondingly named subdirectories at the given " + "location after tests complete. This option can be used to exclude test results from automatic cleanup, " + "e.g. for manual inspection. The provided path is created if it does not already exist. An error is " + "thrown if any matching files already exist.", + ) + + parser.addoption( + "--keep-failed", + action="store", + default=None, + help="Move the contents of temporary test directories to correspondingly named subdirectories at the given " + "location if the test case fails. This option automatically saves the outputs of failed tests in the " + "given location. The path is created if it doesn't already exist. An error is thrown if files with the " + "same names already exist in the given location.", + ) + + parser.addoption( + "-S", + "--smoke", + action="store_true", + default=False, + help="Run only smoke tests (should complete in <1 minute).", + ) + + parser.addoption( + "-M", + "--meta", + action="store", + metavar="NAME", + help="Indicates a test should only be run by other tests (e.g., to test framework or fixtures).", + ) + + parser.addoption( + "--model", + action="append", + type=str, + help="Select a subset of models to run.", + ) + + parser.addoption( + "--package", + action="append", + type=str, + help="Select a subset of packages to run.", + ) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", + "meta(name): run only by other tests", + ) + + +def pytest_runtest_setup(item): + # skip meta-tests unless specified + meta = item.config.getoption("--meta") + metagroups = [mark.args[0] for mark in item.iter_markers(name="meta")] + if metagroups and meta not in metagroups: + pytest.skip() + + # smoke tests are \ {slow U example U regression} + smoke = item.config.getoption("--smoke") + slow = list(item.iter_markers(name="slow")) + example = list(item.iter_markers(name="example")) + regression = list(item.iter_markers(name="regression")) + if smoke and (slow or example or regression): + pytest.skip() + + +@pytest.hookimpl(hookwrapper=True, tryfirst=True) +def pytest_runtest_makereport(item, call): + # this is necessary so temp dir fixtures can + # inspect test results and check for failure + # (see https://doc.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures) + + outcome = yield + rep = outcome.get_result() + + # report attribute for each phase (setup, call, teardown) + # we're only interested in result of the function call + setattr(item, "rep_" + rep.when, rep) + + +def pytest_generate_tests(metafunc): + models_selected = metafunc.config.getoption("--model", None) + packages_selected = metafunc.config.getoption("--package", None) + repos_path = environ.get("REPOS_PATH") + + key = "test_model_mf6" + if key in metafunc.fixturenames: + models = ( + get_models( + Path(repos_path) / "modflow6-testmodels" / "mf6", + prefix="test", + excluded=["test205_gwtbuy-henrytidal"], + selected=models_selected, + packages=packages_selected, + ) + if repos_path + else [] + ) + metafunc.parametrize(key, models, ids=[m.name for m in models]) + + key = "test_model_mf5to6" + if key in metafunc.fixturenames: + models = ( + get_models( + Path(repos_path) / "modflow6-testmodels" / "mf5to6", + prefix="test", + namefile="*.nam", + excluded=["test205_gwtbuy-henrytidal"], + selected=models_selected, + packages=packages_selected, + ) + if repos_path + else [] + ) + metafunc.parametrize(key, models, ids=[m.name for m in models]) + + key = "large_test_model" + if key in metafunc.fixturenames: + models = ( + get_models( + Path(repos_path) / "modflow6-largetestmodels", + prefix="test", + namefile="*.nam", + excluded=[], + selected=models_selected, + packages=packages_selected, + ) + if repos_path + else [] + ) + metafunc.parametrize(key, models, ids=[m.name for m in models]) + + key = "example_scenario" + if key in metafunc.fixturenames: + + def example_namfile_is_nested(namfile_path: PathLike) -> bool: + p = Path(namfile_path) + if not p.is_file() or not p.name.endswith(".nam"): + raise ValueError(f"Expected namefile path, got {p}") + + return p.parent.parent.name == "examples" + + def example_name_from_namfile_path(path: PathLike) -> str: + p = Path(path) + if not p.is_file() or not p.name.endswith(".nam"): + raise ValueError(f"Expected namefile path, got {p}") + + return ( + p.parent.parent.name + if example_namfile_is_nested(p) + else p.parent.name + ) + + def group_examples(namefile_paths) -> Dict[str, List[Path]]: + d = OrderedDict() + + for name, paths in groupby( + namefile_paths, key=example_name_from_namfile_path + ): + # sort alphabetically (gwf < gwt) + nfpaths = sorted(list(paths)) + + # skip if no models found + if len(nfpaths) == 0: + continue + + d[name] = nfpaths + + return d + + def get_examples(): + examples_excluded = [ + "ex-gwf-csub-p02c", + "ex-gwt-gwtgwt-mt3dms-p10", + ] + + # find and filter namfiles + namfiles = [ + p + for p in ( + Path(repos_path) / "modflow6-examples" / "examples" + ).rglob("mfsim.nam") + ] + namfiles = [ + p + for p in namfiles + if (not any(e in str(p) for e in examples_excluded)) + ] + + # group example scenarios with multiple models + examples = group_examples(namfiles) + + # filter by example name (optional) + if models_selected: + examples = { + name: nfps + for name, nfps in examples.items() + if any(s in name for s in models_selected) + } + + # filter by package (optional) + if packages_selected: + filtered = [] + for name, namefiles in examples.items(): + ftypes = [] + for namefile in namefiles: + ftype = get_mf6_ftypes(namefile, packages_selected) + if ftype not in ftypes: + ftypes += ftype + if len(ftypes) > 0: + ftypes = [item.upper() for item in ftypes] + for pkg in packages_selected: + if pkg in ftypes: + filtered.append(name) + break + examples = { + name: nfps + for name, nfps in examples.items() + if name in filtered + } + + # remove mf6gwf and mf6gwt + examples = { + name: nfps + for name, nfps in examples.items() + if name not in ["mf6gwf", "mf6gwt"] + } + + return examples + + example_scenarios = get_examples() if repos_path else dict() + metafunc.parametrize( + key, + [(name, nfps) for name, nfps in example_scenarios.items()], + ids=[name for name, ex in example_scenarios.items()], + ) diff --git a/modflow_devtools/framework.py b/modflow_devtools/framework.py index 1a9c530..215db7f 100644 --- a/modflow_devtools/framework.py +++ b/modflow_devtools/framework.py @@ -1,86 +1,876 @@ import os -import sys - -import flopy - - -def running_on_CI(): - return "TRAVIS" in os.environ or "CI" in os.environ - - -def set_teardown_test(): - teardown = True - for idx, arg in enumerate(sys.argv): - if arg.lower() == "--keep": - teardown = False - return teardown - - -class testing_framework(object): - def __init__(self): - return - - def build_mf6_models(self, build_function, idx, exdir): - """ - Build base and regression MODFLOW 6 models - - Parameters - ---------- - build_function : function - user defined function that builds a base model and optionally - builds a regression model. If a regression model is not built - then None must be returned from the function for the regression - model. - idx : int - counter that corresponds to exdir entry - exdir : str - path to regression model files - """ - base, regression = build_function(idx, exdir) - base.write_simulation() - if regression is not None: - if isinstance(regression, flopy.mf6.MFSimulation): - regression.write_simulation() +import shutil + +ignore_ext = ( + ".hds", + ".hed", + ".bud", + ".cbb", + ".cbc", + ".ddn", + ".ucn", + ".glo", + ".lst", + ".list", + ".gwv", + ".mv", + ".out", +) + + +def model_setup(namefile, dst, remove_existing=True, extrafiles=None): + """Setup MODFLOW-based model files for autotests. + + Parameters + ---------- + namefile : str + MODFLOW-based model name file. + dst : str + destination path for comparison model or file(s) + remove_existing : bool + boolean indicating if an existing comparision model or file(s) should + be replaced (default is True) + extrafiles : str or list of str + list of extra files to include in the comparision + + Returns + ------- + + """ + # Construct src pth from namefile or lgr file + src = os.path.dirname(namefile) + + # Create the destination folder, if required + create_dir = False + if os.path.exists(dst): + if remove_existing: + print("Removing folder " + dst) + shutil.rmtree(dst) + create_dir = True + else: + create_dir = True + if create_dir: + os.mkdir(dst) + + # determine if a namefile is a lgr control file - get individual + # name files out of the lgr control file + namefiles = [namefile] + ext = os.path.splitext(namefile)[1] + if ".lgr" in ext.lower(): + lines = [line.rstrip("\n") for line in open(namefile)] + for line in lines: + if len(line) < 1: + continue + if line[0] == "#": + continue + t = line.split() + if ".nam" in t[0].lower(): + fpth = os.path.join(src, t[0]) + namefiles.append(fpth) + + # Make list of files to copy + files2copy = [] + for fpth in namefiles: + files2copy.append(os.path.basename(fpth)) + ext = os.path.splitext(fpth)[1] + # copy additional files contained in the name file and + # associated package files + if ext.lower() == ".nam": + fname = os.path.abspath(fpth) + files2copy = files2copy + get_input_files(fname) + + if extrafiles is not None: + if isinstance(extrafiles, str): + extrafiles = [extrafiles] + for fl in extrafiles: + files2copy.append(os.path.basename(fl)) + + # Copy the files + for f in files2copy: + srcf = os.path.join(src, f) + dstf = os.path.join(dst, f) + + # Check to see if dstf is going into a subfolder, and create that + # subfolder if it doesn't exist + sf = os.path.dirname(dstf) + if not os.path.isdir(sf): + os.makedirs(sf) + + # Now copy the file + if os.path.exists(srcf): + print("Copy file '" + srcf + "' -> '" + dstf + "'") + shutil.copy(srcf, dstf) + else: + print(srcf + " does not exist") + + +def setup_comparison(namefile, dst, remove_existing=True): + """Setup a comparison model or comparision file(s) for a MODFLOW-based + model. + + Parameters + ---------- + namefile : str + MODFLOW-based model name file. + dst : str + destination path for comparison model or file(s) + remove_existing : bool + boolean indicating if an existing comparision model or file(s) should + be replaced (default is True) + + + Returns + ------- + + """ + # Construct src pth from namefile + src = os.path.dirname(namefile) + action = None + for root, dirs, files in os.walk(src): + dl = [d.lower() for d in dirs] + if any(".cmp" in s for s in dl): + idx = None + for jdx, d in enumerate(dl): + if ".cmp" in d: + idx = jdx + break + if idx is not None: + if "mf2005.cmp" in dl[idx] or "mf2005" in dl[idx]: + action = dirs[idx] + elif "mfnwt.cmp" in dl[idx] or "mfnwt" in dl[idx]: + action = dirs[idx] + elif "mfusg.cmp" in dl[idx] or "mfusg" in dl[idx]: + action = dirs[idx] + elif "mf6.cmp" in dl[idx] or "mf6" in dl[idx]: + action = dirs[idx] + elif "libmf6.cmp" in dl[idx] or "libmf6" in dl[idx]: + action = dirs[idx] + else: + action = dirs[idx] + break + if action is not None: + dst = os.path.join(dst, f"{action}") + if not os.path.isdir(dst): + try: + os.mkdir(dst) + except: + print("Could not make " + dst) + # clean directory + else: + print(f"cleaning...{dst}") + for root, dirs, files in os.walk(dst): + for f in files: + tpth = os.path.join(root, f) + print(f" removing...{tpth}") + os.remove(tpth) + for d in dirs: + tdir = os.path.join(root, d) + print(f" removing...{tdir}") + shutil.rmtree(tdir) + # copy files + cmppth = os.path.join(src, action) + files = os.listdir(cmppth) + files2copy = [] + if action.lower() == ".cmp": + for file in files: + if ".cmp" in os.path.splitext(file)[1].lower(): + files2copy.append(os.path.join(cmppth, file)) + for srcf in files2copy: + f = os.path.basename(srcf) + dstf = os.path.join(dst, f) + # Now copy the file + if os.path.exists(srcf): + print("Copy file '" + srcf + "' -> '" + dstf + "'") + shutil.copy(srcf, dstf) + else: + print(srcf + " does not exist") + else: + for file in files: + if ".nam" in os.path.splitext(file)[1].lower(): + files2copy.append( + os.path.join(cmppth, os.path.basename(file)) + ) + nf = os.path.join(src, action, os.path.basename(file)) + model_setup(nf, dst, remove_existing=remove_existing) + break + + return action + + +def get_input_files(namefile): + """Return a list of all the input files in this model. + + Parameters + ---------- + namefile : str + path to a MODFLOW-based model name file + + Returns + ------- + filelist : list + list of MODFLOW-based model input files + + """ + srcdir = os.path.dirname(namefile) + filelist = [] + fname = os.path.join(srcdir, namefile) + with open(fname, "r") as f: + lines = f.readlines() + + for line in lines: + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + ext = os.path.splitext(ll[2])[1] + if ext.lower() not in ignore_ext: + if len(ll) > 3: + if "replace" in ll[3].lower(): + continue + filelist.append(ll[2]) + + # Now go through every file and look for other files to copy, + # such as 'OPEN/CLOSE'. If found, then add that file to the + # list of files to copy. + otherfiles = [] + for fname in filelist: + fname = os.path.join(srcdir, fname) + try: + f = open(fname, "r") + for line in f: + + # Skip invalid lines + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + + if "OPEN/CLOSE" in line.upper(): + for i, s in enumerate(ll): + if "OPEN/CLOSE" in s.upper(): + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + otherfiles.append(stmp) + break + except: + print(fname + " does not exist") + + filelist = filelist + otherfiles + + return filelist + + +def get_namefiles(pth, exclude=None): + """Search through a path (pth) for all .nam files. + + Parameters + ---------- + pth : str + path to model files + exclude : str or lst + File or list of files to exclude from the search (default is None) + + Returns + ------- + namefiles : lst + List of namefiles with paths + + """ + namefiles = [] + for root, _, files in os.walk(pth): + namefiles += [ + os.path.join(root, file) for file in files if file.endswith(".nam") + ] + if exclude is not None: + if isinstance(exclude, str): + exclude = [exclude] + exclude = [e.lower() for e in exclude] + pop_list = [] + for namefile in namefiles: + for e in exclude: + if e in namefile.lower(): + pop_list.append(namefile) + for e in pop_list: + namefiles.remove(e) + + return namefiles + + +def get_sim_name(namefiles, rootpth=None): + """Get simulation name. + + Parameters + ---------- + namefiles : str or list of strings + path(s) to MODFLOW-based model name files + rootpth : str + optional root directory path (default is None) + + Returns + ------- + simname : list + list of namefiles without the file extension + + """ + if isinstance(namefiles, str): + namefiles = [namefiles] + sim_name = [] + for namefile in namefiles: + t = namefile.split(os.sep) + if rootpth is None: + idx = -1 + else: + idx = t.index(os.path.split(rootpth)[1]) + + # build dst with everything after the rootpth and before + # the namefile file name. + dst = "" + if idx < len(t): + for d in t[idx + 1 : -1]: + dst += f"{d}_" + + # add namefile basename without extension + dst += t[-1].replace(".nam", "") + sim_name.append(dst) + + return sim_name + + +def setup_mf6( + src, dst, mfnamefile="mfsim.nam", extrafiles=None, remove_existing=True +): + """Copy all of the MODFLOW 6 input files from the src directory to the dst + directory. + + Parameters + ---------- + src : src + directory path with original MODFLOW 6 input files + dst : str + directory path that original MODFLOW 6 input files will be copied to + mfnamefile : str + optional MODFLOW 6 simulation name file (default is mfsim.nam) + extrafiles : bool + boolean indicating if extra files should be included (default is None) + remove_existing : bool + boolean indicating if existing file in dst should be removed (default + is True) + + Returns + ------- + mf6inp : list + list of MODFLOW 6 input files + mf6outp : list + list of MODFLOW 6 output files + + """ + + # Create the destination folder + create_dir = False + if os.path.exists(dst): + if remove_existing: + print("Removing folder " + dst) + shutil.rmtree(dst) + create_dir = True + else: + create_dir = True + if create_dir: + os.makedirs(dst) + + # Make list of files to copy + fname = os.path.join(src, mfnamefile) + fname = os.path.abspath(fname) + mf6inp, mf6outp = get_mf6_files(fname) + files2copy = [mfnamefile] + mf6inp + + # determine if there are any .ex files + exinp = [] + for f in mf6outp: + ext = os.path.splitext(f)[1] + if ext.lower() == ".hds": + pth = os.path.join(src, f + ".ex") + if os.path.isfile(pth): + exinp.append(f + ".ex") + if len(exinp) > 0: + files2copy += exinp + if extrafiles is not None: + files2copy += extrafiles + + # Copy the files + for f in files2copy: + srcf = os.path.join(src, f) + dstf = os.path.join(dst, f) + + # Check to see if dstf is going into a subfolder, and create that + # subfolder if it doesn't exist + sf = os.path.dirname(dstf) + if not os.path.isdir(sf): + try: + os.mkdir(sf) + except: + print("Could not make " + sf) + + # Now copy the file + if os.path.exists(srcf): + print("Copy file '" + srcf + "' -> '" + dstf + "'") + shutil.copy(srcf, dstf) + else: + print(srcf + " does not exist") + + return mf6inp, mf6outp + + +def get_mf6_comparison(src): + """Determine comparison type for MODFLOW 6 simulation. + + Parameters + ---------- + src : str + directory path to search for comparison types + + Returns + ------- + action : str + comparison type + + """ + action = None + # Possible comparison - the order matters + optcomp = ( + "compare", + ".cmp", + "mf2005", + "mf2005.cmp", + "mfnwt", + "mfnwt.cmp", + "mfusg", + "mfusg.cmp", + "mflgr", + "mflgr.cmp", + "libmf6", + "libmf6.cmp", + "mf6", + "mf6.cmp", + ) + # Construct src pth from namefile + action = None + for _, dirs, _ in os.walk(src): + dl = [d.lower() for d in dirs] + for oc in optcomp: + if any(oc in s for s in dl): + action = oc + break + return action + + +def setup_mf6_comparison(src, dst, remove_existing=True): + """Setup comparision for MODFLOW 6 simulation. + + Parameters + ---------- + src : src + directory path with original MODFLOW 6 input files + dst : str + directory path that original MODFLOW 6 input files will be copied to + remove_existing : bool + boolean indicating if existing file in dst should be removed (default + is True) + + Returns + ------- + action : str + comparison type + + """ + # get the type of comparison to use (compare, mf2005, etc.) + action = get_mf6_comparison(src) + + if action is not None: + dst = os.path.join(dst, f"{action}") + if not os.path.isdir(dst): + try: + os.mkdir(dst) + except: + print("Could not make " + dst) + # clean directory + else: + print(f"cleaning...{dst}") + for root, dirs, files in os.walk(dst): + for f in files: + tpth = os.path.join(root, f) + print(f" removing...{tpth}") + os.remove(tpth) + for d in dirs: + tdir = os.path.join(root, d) + print(f" removing...{tdir}") + shutil.rmtree(tdir) + # copy files + cmppth = os.path.join(src, action) + files = os.listdir(cmppth) + files2copy = [] + if action.lower() == "compare" or action.lower() == ".cmp": + for file in files: + if ".cmp" in os.path.splitext(file)[1].lower(): + files2copy.append(os.path.join(cmppth, file)) + for srcf in files2copy: + f = os.path.basename(srcf) + dstf = os.path.join(dst, f) + # Now copy the file + if os.path.exists(srcf): + print("Copy file '" + srcf + "' -> '" + dstf + "'") + shutil.copy(srcf, dstf) + else: + print(srcf + " does not exist") + else: + if "mf6" in action.lower(): + for file in files: + if "mfsim.nam" in file.lower(): + srcf = os.path.join(cmppth, os.path.basename(file)) + files2copy.append(srcf) + srcdir = os.path.join(src, action) + setup_mf6(srcdir, dst, remove_existing=remove_existing) + break + else: + for file in files: + if ".nam" in os.path.splitext(file)[1].lower(): + srcf = os.path.join(cmppth, os.path.basename(file)) + files2copy.append(srcf) + nf = os.path.join(src, action, os.path.basename(file)) + model_setup(nf, dst, remove_existing=remove_existing) + break + + return action + + +def get_mf6_nper(tdisfile): + """Return the number of stress periods in the MODFLOW 6 model. + + Parameters + ---------- + tdisfile : str + path to the TDIS file + + Returns + ------- + nper : int + number of stress periods in the simulation + + """ + with open(tdisfile, "r") as f: + lines = f.readlines() + line = [line for line in lines if "NPER" in line.upper()][0] + nper = line.strip().split()[1] + return nper + + +def get_mf6_mshape(disfile): + """Return the shape of the MODFLOW 6 model. + + Parameters + ---------- + disfile : str + path to a MODFLOW 6 discretization file + + Returns + ------- + mshape : tuple + tuple with the shape of the MODFLOW 6 model. + + """ + with open(disfile, "r") as f: + lines = f.readlines() + + d = {} + for line in lines: + + # Skip over blank and commented lines + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + + for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]: + if ll[0].upper() in key: + d[key] = int(ll[1]) + + if "NODES" in d: + mshape = (d["NODES"],) + elif "NCPL" in d: + mshape = (d["NLAY"], d["NCPL"]) + elif "NLAY" in d: + mshape = (d["NLAY"], d["NROW"], d["NCOL"]) + else: + print(d) + raise Exception("Could not determine model shape") + return mshape + + +def get_mf6_files(mfnamefile): + """Return a list of all the MODFLOW 6 input and output files in this model. + + Parameters + ---------- + mfnamefile : str + path to the MODFLOW 6 simulation name file + + Returns + ------- + filelist : list + list of MODFLOW 6 input files in a simulation + outplist : list + list of MODFLOW 6 output files in a simulation + + """ + + srcdir = os.path.dirname(mfnamefile) + filelist = [] + outplist = [] + + filekeys = ["TDIS6", "GWF6", "GWT", "GWF6-GWF6", "GWF-GWT", "IMS6"] + namefilekeys = ["GWF6", "GWT"] + namefiles = [] + + with open(mfnamefile) as f: + + # Read line and skip comments + lines = f.readlines() + + for line in lines: + + # Skip over blank and commented lines + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + + for key in filekeys: + if key in ll[0].upper(): + fname = ll[1] + filelist.append(fname) + + for key in namefilekeys: + if key in ll[0].upper(): + fname = ll[1] + namefiles.append(fname) + + # Go through name files and get files + for namefile in namefiles: + fname = os.path.join(srcdir, namefile) + with open(fname, "r") as f: + lines = f.readlines() + insideblock = False + + for line in lines: + ll = line.upper().strip().split() + if len(ll) < 2: + continue + if ll[0] in "BEGIN" and ll[1] in "PACKAGES": + insideblock = True + continue + if ll[0] in "END" and ll[1] in "PACKAGES": + insideblock = False + + if insideblock: + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + filelist.append(ll[1]) + + # Recursively go through every file and look for other files to copy, + # such as 'OPEN/CLOSE' and 'TIMESERIESFILE'. If found, then + # add that file to the list of files to copy. + flist = filelist + # olist = outplist + while True: + olist = [] + flist, olist = _get_mf6_external_files(srcdir, olist, flist) + # add to filelist + if len(flist) > 0: + filelist = filelist + flist + # add to outplist + if len(olist) > 0: + outplist = outplist + olist + # terminate loop if no additional files + # if len(flist) < 1 and len(olist) < 1: + if len(flist) < 1: + break + + return filelist, outplist + + +def _get_mf6_external_files(srcdir, outplist, files): + """Get list of external files in a MODFLOW 6 simulation. + + Parameters + ---------- + srcdir : str + path to a directory containing a MODFLOW 6 simulation + outplist : list + list of output files in a MODFLOW 6 simulation + files : list + list of MODFLOW 6 name files + + Returns + ------- + + """ + extfiles = [] + + for fname in files: + fname = os.path.join(srcdir, fname) + try: + f = open(fname, "r") + for line in f: + + # Skip invalid lines + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + + if "OPEN/CLOSE" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "OPEN/CLOSE": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "TS6" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "FILEIN": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "TAS6" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "FILEIN": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "OBS6" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "FILEIN": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "EXTERNAL" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "EXTERNAL": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "FILE" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "FILEIN": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + extfiles.append(stmp) + break + + if "FILE" in line.upper(): + for i, s in enumerate(ll): + if s.upper() == "FILEOUT": + stmp = ll[i + 1] + stmp = stmp.replace('"', "") + stmp = stmp.replace("'", "") + outplist.append(stmp) + break + + except: + print("could not get a list of external mf6 files") + + return extfiles, outplist + + +def get_mf6_ftypes(namefile, ftypekeys): + """Return a list of FTYPES that are in the name file and in ftypekeys. + + Parameters + ---------- + namefile : str + path to a MODFLOW 6 name file + ftypekeys : list + list of desired FTYPEs + + Returns + ------- + ftypes : list + list of FTYPES that match ftypekeys in namefile + + """ + with open(namefile, "r") as f: + lines = f.readlines() + + ftypes = [] + for line in lines: + + # Skip over blank and commented lines + ll = line.strip().split() + if len(ll) < 2: + continue + if line.strip()[0] in ["#", "!"]: + continue + + for key in ftypekeys: + if ll[0].upper() in key: + ftypes.append(ll[0]) + + return ftypes + + +def get_mf6_blockdata(f, blockstr): + """Return list with all non comments between start and end of block + specified by blockstr. + + Parameters + ---------- + f : file object + open file object + blockstr : str + name of block to search + + Returns + ------- + data : list + list of data in specified block + + """ + data = [] + + # find beginning of block + for line in f: + if line[0] != "#": + t = line.split() + if t[0].lower() == "begin" and t[1].lower() == blockstr.lower(): + break + for line in f: + if line[0] != "#": + t = line.split() + if t[0].lower() == "end" and t[1].lower() == blockstr.lower(): + break else: - regression.write_input() - - def build_mf6_models_legacy(self, build_function, idx, exdir): - """ - Build base and regression for older MODFLOW 6 models - - Parameters - ---------- - build_function : function - user defined function that builds a base model and optionally - builds a regression model. If a regression model is not built - then None must be returned from the function for the regression - model. - idx : int - counter that corresponds to exdir entry - exdir : str - path to regression model files - """ - base, regression = build_function(idx, exdir) - base.write_simulation() - if regression is not None: - regression.write_input() - - def run_mf6(self, sim): - """ - Run the MODFLOW 6 simulation and compare to existing head file or - appropriate MODFLOW-2005, MODFLOW-NWT, MODFLOW-USG, or MODFLOW-LGR run. - - Parameters - ---------- - sim : Simulation object - MODFLOW 6 autotest simulation object that runs the base and - regression models, compares the results, and tears down the - test if successful. - """ - print(os.getcwd()) - sim.set_model(sim.name, testModel=False) - sim.run() - sim.compare() - if sim.exfunc is not None: - sim.exfunc(sim) - sim.teardown() + data.append(line.rstrip()) + return data diff --git a/modflow_devtools/markers.py b/modflow_devtools/markers.py new file mode 100644 index 0000000..7099930 --- /dev/null +++ b/modflow_devtools/markers.py @@ -0,0 +1,69 @@ +from platform import system + +import pytest +from modflow_devtools.misc import ( + get_current_branch, + has_exe, + has_pkg, + is_connected, + is_in_ci, +) + + +def requires_exe(*exes): + missing = {exe for exe in exes if not has_exe(exe)} + return pytest.mark.skipif( + missing, + reason=f"missing executable{'s' if len(missing) != 1 else ''}: " + + ", ".join(missing), + ) + + +def requires_pkg(*pkgs): + missing = {pkg for pkg in pkgs if not has_pkg(pkg)} + return pytest.mark.skipif( + missing, + reason=f"missing package{'s' if len(missing) != 1 else ''}: " + + ", ".join(missing), + ) + + +def requires_platform(platform, ci_only=False): + return pytest.mark.skipif( + system().lower() != platform.lower() + and (is_in_ci() if ci_only else True), + reason=f"only compatible with platform: {platform.lower()}", + ) + + +def excludes_platform(platform, ci_only=False): + return pytest.mark.skipif( + system().lower() == platform.lower() + and (is_in_ci() if ci_only else True), + reason=f"not compatible with platform: {platform.lower()}", + ) + + +def requires_branch(branch): + current = get_current_branch() + return pytest.mark.skipif( + current != branch, reason=f"must run on branch: {branch}" + ) + + +def excludes_branch(branch): + current = get_current_branch() + return pytest.mark.skipif( + current == branch, reason=f"can't run on branch: {branch}" + ) + + +requires_github = pytest.mark.skipif( + not is_connected("github.com"), reason="github.com is required." +) + + +requires_spatial_reference = pytest.mark.skipif( + not is_connected("spatialreference.org"), + reason="spatialreference.org is required.", +) diff --git a/modflow_devtools/misc.py b/modflow_devtools/misc.py new file mode 100644 index 0000000..8be1c61 --- /dev/null +++ b/modflow_devtools/misc.py @@ -0,0 +1,269 @@ +import importlib +import socket +import sys +from contextlib import contextmanager +from os import PathLike, chdir, environ, getcwd +from os.path import basename, normpath +from pathlib import Path +from shutil import which +from subprocess import PIPE, Popen +from typing import List, Optional, Tuple +from urllib import request + +import pkg_resources +from _warnings import warn + + +@contextmanager +def set_dir(path: PathLike): + origin = Path(getcwd()).absolute() + wrkdir = Path(path).expanduser().absolute() + + try: + chdir(path) + print(f"Changed to working directory: {wrkdir} (previously: {origin})") + yield + finally: + chdir(origin) + print(f"Returned to previous directory: {origin}") + + +class add_sys_path: + """ + Context manager for temporarily editing the system path + (https://stackoverflow.com/a/39855753/6514033) + """ + + def __init__(self, path): + self.path = path + + def __enter__(self): + sys.path.insert(0, self.path) + + def __exit__(self, exc_type, exc_value, traceback): + try: + sys.path.remove(self.path) + except ValueError: + pass + + +def get_suffixes(ostag) -> Tuple[str, str]: + """Returns executable and library suffixes for the given OS (as returned by sys.platform)""" + + tag = ostag.lower() + + if tag in ["win32", "win64"]: + return ".exe", ".dll" + elif tag == "linux": + return "", ".so" + elif tag == "mac" or tag == "darwin": + return "", ".dylib" + else: + raise KeyError(f"unrecognized OS tag: {ostag!r}") + + +def run_cmd(*args, verbose=False, **kwargs): + """Run any command, return tuple (stdout, stderr, returncode).""" + args = [str(g) for g in args] + if verbose: + print("running: " + " ".join(args)) + p = Popen(args, stdout=PIPE, stderr=PIPE, **kwargs) + stdout, stderr = p.communicate() + stdout = stdout.decode() + stderr = stderr.decode() + returncode = p.returncode + if verbose: + print(f"stdout:\n{stdout}") + print(f"stderr:\n{stderr}") + print(f"returncode: {returncode}") + return stdout, stderr, returncode + + +def run_py_script(script, *args, verbose=False): + """Run a Python script, return tuple (stdout, stderr, returncode).""" + return run_cmd( + sys.executable, script, *args, verbose=verbose, cwd=Path(script).parent + ) + + +def get_current_branch() -> str: + # check if on GitHub Actions CI + ref = environ.get("GITHUB_REF") + if ref is not None: + return basename(normpath(ref)).lower() + + # otherwise ask git about it + if not which("git"): + raise RuntimeError("'git' required to determine current branch") + stdout, stderr, code = run_cmd("git", "rev-parse", "--abbrev-ref", "HEAD") + if code == 0 and stdout: + return stdout.strip().lower() + raise ValueError(f"Could not determine current branch: {stderr}") + + +def get_mf6_ftypes(namefile_path: PathLike, ftypekeys: List[str]) -> List[str]: + """ + Return a list of FTYPES that are in the name file and in ftypekeys. + + Parameters + ---------- + namefile_path : str + path to a MODFLOW 6 name file + ftypekeys : list + list of desired FTYPEs + Returns + ------- + ftypes : list + list of FTYPES that match ftypekeys in namefile + """ + with open(namefile_path, "r") as f: + lines = f.readlines() + + ftypes = [] + for line in lines: + # Skip over blank and commented lines + ll = line.strip().split() + if len(ll) < 2: + continue + + if ll[0] in ["#", "!"]: + continue + + for key in ftypekeys: + if key.lower() in ll[0].lower(): + ftypes.append(ll[0]) + + return ftypes + + +def has_packages(namefile_path: PathLike, packages: List[str]) -> bool: + ftypes = [item.upper() for item in get_mf6_ftypes(namefile_path, packages)] + return len(ftypes) > 0 + + +def get_models( + path: PathLike, + prefix: str = None, + namefile: str = "mfsim.nam", + excluded=None, + selected=None, + packages=None, +) -> List[Path]: + """ + Find models in the given filesystem location. + """ + + # if path doesn't exist, return empty list + if not Path(path).is_dir(): + return [] + + # find namfiles + namfile_paths = [ + p + for p in Path(path).rglob( + f"{prefix}*/{namefile}" if prefix else namefile + ) + ] + + # remove excluded + namfile_paths = [ + p + for p in namfile_paths + if (not excluded or not any(e in str(p) for e in excluded)) + ] + + # filter by package (optional) + if packages: + namfile_paths = [ + p + for p in namfile_paths + if (has_packages(p, packages) if packages else True) + ] + + # get model dir paths + model_paths = [p.parent for p in namfile_paths] + + # filter by model name (optional) + if selected: + model_paths = [ + model + for model in model_paths + if any(s in model.name for s in selected) + ] + + # exclude dev examples on master or release branches + branch = get_current_branch() + if "master" in branch.lower() or "release" in branch.lower(): + model_paths = [ + model for model in model_paths if "_dev" not in model.name.lower() + ] + + return model_paths + + +def is_connected(hostname): + """See https://stackoverflow.com/a/20913928/ to test hostname.""" + try: + host = socket.gethostbyname(hostname) + s = socket.create_connection((host, 80), 2) + s.close() + return True + except Exception: + pass + return False + + +def is_in_ci(): + # if running in GitHub Actions CI, "CI" variable always set to true + # https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables + return bool(environ.get("CI", None)) + + +def is_github_rate_limited() -> Optional[bool]: + """ + Determines if a GitHub API rate limit is applied to the current IP. + Note that running this function will consume an API request! + + Returns + ------- + True if rate-limiting is applied, otherwise False (or None if the connection fails). + """ + try: + with request.urlopen( + "https://api.github.com/users/octocat" + ) as response: + remaining = int(response.headers["x-ratelimit-remaining"]) + if remaining < 10: + warn( + f"Only {remaining} GitHub API requests remaining before rate-limiting" + ) + return remaining > 0 + except: + return None + + +_has_exe_cache = {} +_has_pkg_cache = {} + + +def has_exe(exe): + if exe not in _has_exe_cache: + _has_exe_cache[exe] = bool(which(exe)) + return _has_exe_cache[exe] + + +def has_pkg(pkg): + if pkg not in _has_pkg_cache: + + # for some dependencies, package name and import name are different + # (e.g. pyshp/shapefile, mfpymake/pymake, python-dateutil/dateutil) + # pkg_resources expects package name, importlib expects import name + try: + _has_pkg_cache[pkg] = bool(importlib.import_module(pkg)) + except (ImportError, ModuleNotFoundError): + try: + _has_pkg_cache[pkg] = bool(pkg_resources.get_distribution(pkg)) + except pkg_resources.DistributionNotFound: + _has_pkg_cache[pkg] = False + + return _has_pkg_cache[pkg] diff --git a/modflow_devtools/simulation.py b/modflow_devtools/simulation.py deleted file mode 100644 index 8e90496..0000000 --- a/modflow_devtools/simulation.py +++ /dev/null @@ -1,798 +0,0 @@ -import os -import shutil -import sys -import time - -import flopy -import numpy as np - -from .framework import running_on_CI, set_teardown_test -from .mftest_context import MFTestContext -from .targets import get_target_dictionary -from .testing.testing import ( - compare_heads, - get_mf6_comparison, - get_mf6_files, - get_namefiles, - setup_mf6, - setup_mf6_comparison, -) - -sfmt = "{:25s} - {}" -extdict = { - "hds": "head", - "hed": "head", - "bhd": "head", - "ucn": "concentration", - "cbc": "cell-by-cell", -} - - -class Simulation(object): - def __init__( - self, - name, - exfunc=None, - exe_dict=None, - testbin=None, - htol=None, - pdtol=None, - rclose=None, - idxsim=None, - cmp_verbose=True, - require_failure=None, - api_func=None, - mf6_regression=False, - make_comparison=True, - ): - msg = sfmt.format("Initializing test", name) - print(msg) - - self.name = name - self.exfunc = exfunc - self.ctx = None - self.simpath = None - self.inpt = None - self.outp = None - self.coutp = None - self.api_func = api_func - self.mf6_regression = mf6_regression - self.make_comparison = make_comparison - self.action = None - - if testbin is not None: - self.ctx = MFTestContext(testbin=testbin) - self.target_dict = self.ctx.get_target_dictionary() - else: - self.target_dict = get_target_dictionary() - - if exe_dict is not None: - if not isinstance(exe_dict, dict): - msg = "exe_dict must be a dictionary" - assert False, msg - keys = list(self.target_dict.keys()) - for key, value in exe_dict.items(): - if key in keys: - exe0 = self.target_dict[key] - exe = os.path.join(os.path.dirname(exe0), value) - msg = ( - f"replacing {key} executable " - + f'"{self.target_dict[key]}" with ' - + f'"{exe}".' - ) - print(msg) - self.target_dict[key] = exe - - for idx, arg in enumerate(sys.argv): - if arg[2:].lower() in list(self.target_dict.keys()): - key = arg[2:].lower() - exe0 = self.target_dict[key] - exe = os.path.join(os.path.dirname(exe0), sys.argv[idx + 1]) - msg = ( - f"replacing {key} executable " - + f'"{self.target_dict[key]}" with ' - + f'"{exe}".' - ) - print(msg) - self.target_dict[key] = exe - - # set htol for comparisons - if htol is None: - htol = 0.001 - else: - msg = sfmt.format("User specified comparison htol", htol) - print(msg) - - self.htol = htol - - # set pdtol for comparisons - if pdtol is None: - pdtol = 0.001 - else: - msg = sfmt.format( - "User specified percent difference comparison pdtol", pdtol - ) - print(msg) - - self.pdtol = pdtol - - # set rclose for comparisons - if rclose is None: - rclose = 0.001 - else: - msg = sfmt.format( - "User specified percent difference comparison rclose", rclose - ) - print(msg) - - self.rclose = rclose - - # set index for multi-simulation comparisons - self.idxsim = idxsim - - # set compare verbosity - self.cmp_verbose = cmp_verbose - - # set allow failure - self.require_failure = require_failure - - self.teardown_test = set_teardown_test() - self.success = False - - # set is_ci - self.is_CI = running_on_CI() - - return - - def __repr__(self): - return self.name - - def set_model(self, pth, testModel=True): - """ - Set paths to MODFLOW 6 model and associated comparison test - """ - # make sure this is a valid path - if not os.path.isdir(pth): - assert False, f"{pth} is not a valid directory" - - self.simpath = pth - - # get MODFLOW 6 output file names - fpth = os.path.join(pth, "mfsim.nam") - mf6inp, mf6outp = get_mf6_files(fpth) - self.outp = mf6outp - - # determine comparison model - self.setup_comparison(pth, pth, testModel=testModel) - if self.action is not None: - if "mf6" in self.action or "mf6-regression" in self.action: - cinp, self.coutp = get_mf6_files(fpth) - - def setup(self, src, dst): - msg = sfmt.format("Setup test", self.name) - print(msg) - self.originpath = src - self.simpath = dst - # write message - print("running setup_mf6 from " + f"{os.path.abspath(os.getcwd())}") - try: - self.inpt, self.outp = setup_mf6(src=src, dst=dst) - print("waiting...") - time.sleep(0.5) - success = True - except: - success = False - print(f"source: {src}") - print(f"destination: {dst}") - assert success, "did not run setup_mf6" - - if success: - self.setup_comparison(src, dst) - - return - - def setup_comparison(self, src, dst, testModel=True): - - # adjust htol if it is smaller than IMS outer_dvclose - dvclose = self._get_dvclose(dst) - if dvclose is not None: - dvclose *= 5.0 - if self.htol < dvclose: - self.htol = dvclose - - # get rclose to use with budget comparisons - rclose = self._get_rclose(dst) - if rclose is None: - rclose = 0.5 - else: - rclose *= 5.0 - self.rclose = rclose - - # Copy comparison simulations if available - if self.mf6_regression: - action = "mf6-regression" - pth = os.path.join(dst, action) - if os.path.isdir(pth): - shutil.rmtree(pth) - shutil.copytree(dst, pth) - elif testModel: - action = setup_mf6_comparison( - src, dst, remove_existing=self.teardown_test - ) - else: - action = get_mf6_comparison(dst) - - self.action = action - - return - - def run(self): - """ - Run the model and assert if the model terminated successfully - """ - msg = sfmt.format("Run test", self.name) - print(msg) - - # Set nam as namefile name without path - nam = None - - # run mf6 models - target, ext = os.path.splitext(self.target_dict["mf6"]) - exe = os.path.abspath(target) - msg = sfmt.format("using executable", exe) - print(msg) - try: - success, buff = flopy.run_model( - exe, - nam, - model_ws=self.simpath, - silent=False, - report=True, - ) - msg = sfmt.format("MODFLOW 6 run", self.name) - if success: - print(msg) - else: - print(msg) - except: - msg = sfmt.format("MODFLOW 6 run", self.name) - print(msg) - success = False - - # set failure based on success and require_failure setting - if self.require_failure is None: - msg = "MODFLOW 6 model did not terminate normally" - if success: - failure = False - else: - failure = True - else: - if self.require_failure: - msg = "MODFLOW 6 model should have failed" - if not success: - failure = False - else: - failure = True - else: - msg = "MODFLOW 6 model should not have failed" - if success: - failure = False - else: - failure = True - - # print end of mfsim.lst to the screen - if failure and self.is_CI: - fpth = os.path.join(self.simpath, "mfsim.lst") - msg = self._get_mfsim_listing(fpth) + msg - - # test for failure - assert not failure, msg - - self.nam_cmp = None - if success: - if self.action is not None: - if self.action.lower() == "compare": - msg = sfmt.format("Comparison files", self.name) - print(msg) - else: - cpth = os.path.join(self.simpath, self.action) - key = self.action.lower().replace(".cmp", "") - exe = os.path.abspath(self.target_dict[key]) - msg = sfmt.format("comparison executable", exe) - print(msg) - if ( - "mf6" in key - or "libmf6" in key - or "mf6-regression" in key - ): - nam = None - else: - npth = get_namefiles(cpth)[0] - nam = os.path.basename(npth) - self.nam_cmp = nam - try: - if self.api_func is None: - success_cmp, buff = flopy.run_model( - exe, - nam, - model_ws=cpth, - silent=False, - report=True, - ) - else: - success_cmp, buff = self.api_func( - exe, self.idxsim, model_ws=cpth - ) - msg = sfmt.format( - "Comparison run", self.name + "/" + key - ) - print(msg) - - # print end of mfsim.lst to the screen - if "mf6" in key: - if not success and self.is_CI: - fpth = os.path.join(cpth, "mfsim.lst") - print(self._get_mfsim_listing(fpth)) - - except: - success_cmp = False - msg = sfmt.format( - "Comparison run", self.name + "/" + key - ) - print(msg) - - assert success_cmp, "Unsuccessful comparison run" - - return - - def compare(self): - """ - Compare the model results - - """ - self.success = True - - # evaluate if comparison should be made - if not self.make_comparison: - return - - msgall = "" - msg = sfmt.format("Comparison test", self.name) - print(msg) - - if self.action is not None: - cpth = os.path.join(self.simpath, self.action) - files_cmp = None - if self.action.lower() == "compare": - files_cmp = [] - files = os.listdir(cpth) - for file in files: - files_cmp.append(file) - elif "mf6" in self.action: - fpth = os.path.join(cpth, "mfsim.nam") - cinp, self.coutp = get_mf6_files(fpth) - - head_extensions = ( - "hds", - "hed", - "bhd", - "ahd", - "bin", - ) - if "mf6-regression" in self.action: - success, msgall = self._compare_heads( - msgall, - extensions=head_extensions, - ) - if not success: - self.success = False - # non-regression runs - for new features - else: - files1 = [] - files2 = [] - exfiles = [] - ipos = 0 - for file1 in self.outp: - ext = os.path.splitext(file1)[1][1:] - - if ext.lower() in head_extensions: - - # simulation file - pth = os.path.join(self.simpath, file1) - files1.append(pth) - - # look for an exclusion file - pth = os.path.join(self.simpath, file1 + ".ex") - if os.path.isfile(pth): - exfiles.append(pth) - else: - exfiles.append(None) - - # Check to see if there is a corresponding compare file - if files_cmp is not None: - - if file1 + ".cmp" in files_cmp: - # compare file - idx = files_cmp.index(file1 + ".cmp") - pth = os.path.join(cpth, files_cmp[idx]) - files2.append(pth) - txt = sfmt.format( - f"Comparison file {ipos + 1}", - os.path.basename(pth), - ) - print(txt) - else: - if self.coutp is not None: - for file2 in self.coutp: - ext = os.path.splitext(file2)[1][1:] - - if ext.lower() in head_extensions: - # simulation file - pth = os.path.join(cpth, file2) - files2.append(pth) - - else: - files2.append(None) - - if self.nam_cmp is None: - pth = None - else: - pth = os.path.join(cpth, self.nam_cmp) - - for ipos in range(len(files1)): - file1 = files1[ipos] - ext = os.path.splitext(file1)[1][1:].lower() - outfile = os.path.splitext(os.path.basename(file1))[0] - outfile = os.path.join( - self.simpath, outfile + "." + ext + ".cmp.out" - ) - if files2 is None: - file2 = None - else: - file2 = files2[ipos] - - # set exfile - exfile = None - if file2 is None: - if len(exfiles) > 0: - exfile = exfiles[ipos] - if exfile is not None: - txt = sfmt.format( - f"Exclusion file {ipos + 1}", - os.path.basename(exfile), - ) - print(txt) - - # make comparison - success_tst = compare_heads( - None, - pth, - precision="double", - text=extdict[ext], - outfile=outfile, - files1=file1, - files2=file2, - htol=self.htol, - difftol=True, - # Change to true to have list of all nodes exceeding htol - verbose=self.cmp_verbose, - exfile=exfile, - ) - msg = sfmt.format( - f"{extdict[ext]} comparison {ipos + 1}", - self.name, - ) - print(msg) - - if not success_tst: - self.success = False - msgall += msg + " ... FAILED\n" - - # compare concentrations - if "mf6-regression" in self.action: - success, msgall = self._compare_concentrations(msgall) - if not success: - self.success = False - - # compare cbc files - if "mf6-regression" in self.action: - cbc_extensions = ( - "cbc", - "bud", - ) - success, msgall = self._compare_budgets( - msgall, extensions=cbc_extensions - ) - if not success: - self.success = False - - assert self.success, msgall - return - - def teardown(self): - """ - Remove the example folder - - """ - if self.success: - if self.teardown_test: - msg = sfmt.format("Teardown test", self.name) - print(msg) - - # wait to delete on windows - if sys.platform.lower() == "win32": - time.sleep(3) - - try: - shutil.rmtree(self.simpath) - success = True - except: - print("Could not remove test " + self.name) - success = False - assert success - else: - print("Retaining test files") - return - - def Ctx(self): - return self.ctx - - def _get_mfsim_listing(self, lst_pth): - """Get the tail of the mfsim.lst listing file""" - msg = "" - ilen = 100 - with open(lst_pth) as fp: - lines = fp.read().splitlines() - msg = "\n" + 79 * "-" + "\n" - if len(lines) > ilen: - i0 = -100 - else: - i0 = 0 - for line in lines[i0:]: - if len(line) > 0: - msg += f"{line}\n" - msg += 79 * "-" + "\n\n" - return msg - - def _get_dvclose(self, dir_pth): - """Get outer_dvclose value from MODFLOW 6 ims file""" - dvclose = None - files = os.listdir(dir_pth) - for file_name in files: - pth = os.path.join(dir_pth, file_name) - if os.path.isfile(pth): - if file_name.lower().endswith(".ims"): - with open(pth) as f: - lines = f.read().splitlines() - for line in lines: - if "outer_dvclose" in line.lower(): - v = float(line.split()[1]) - if dvclose is None: - dvclose = v - else: - if v > dvclose: - dvclose = v - break - - return dvclose - - def _get_rclose(self, dir_pth): - """Get inner_rclose value from MODFLOW 6 ims file""" - rclose = None - files = os.listdir(dir_pth) - for file_name in files: - pth = os.path.join(dir_pth, file_name) - if os.path.isfile(pth): - if file_name.lower().endswith(".ims"): - with open(pth) as f: - lines = f.read().splitlines() - for line in lines: - if "inner_rclose" in line.lower(): - v = float(line.split()[1]) - if rclose is None: - rclose = v - else: - if v > rclose: - rclose = v - break - - return rclose - - def _regression_files(self, extensions): - if isinstance(extensions, str): - extensions = [extensions] - files = os.listdir(self.simpath) - files0 = [] - files1 = [] - for file_name in files: - fpth0 = os.path.join(self.simpath, file_name) - if os.path.isfile(fpth0): - for extension in extensions: - if file_name.lower().endswith(extension): - files0.append(fpth0) - fpth1 = os.path.join( - self.simpath, "mf6-regression", file_name - ) - files1.append(fpth1) - break - return files0, files1 - - def _compare_heads(self, msgall, extensions="hds"): - if isinstance(extensions, str): - extensions = [extensions] - success = True - files0, files1 = self._regression_files(extensions) - extension = "hds" - ipos = 0 - for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)): - outfile = os.path.splitext(os.path.basename(fpth0))[0] - outfile = os.path.join( - self.simpath, outfile + f".{extension}.cmp.out" - ) - success_tst = compare_heads( - None, - None, - precision="double", - htol=self.htol, - text=extdict[extension], - outfile=outfile, - files1=fpth0, - files2=fpth1, - verbose=self.cmp_verbose, - ) - msg = sfmt.format( - f"{extdict[extension]} comparison {ipos + 1}", - f"{self.name} ({os.path.basename(fpth0)})", - ) - ipos += 1 - print(msg) - - if not success_tst: - success = False - msgall += msg + " ... FAILED\n" - - return success, msgall - - def _compare_concentrations(self, msgall, extensions="ucn"): - if isinstance(extensions, str): - extensions = [extensions] - success = True - files0, files1 = self._regression_files(extensions) - extension = "ucn" - ipos = 0 - for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)): - outfile = os.path.splitext(os.path.basename(fpth0))[0] - outfile = os.path.join( - self.simpath, outfile + f".{extension}.cmp.out" - ) - success_tst = compare_heads( - None, - None, - precision="double", - htol=self.htol, - text=extdict[extension], - outfile=outfile, - files1=fpth0, - files2=fpth1, - verbose=self.cmp_verbose, - ) - msg = sfmt.format( - f"{extdict[extension]} comparison {ipos + 1}", - f"{self.name} ({os.path.basename(fpth0)})", - ) - ipos += 1 - print(msg) - - if not success_tst: - success = False - msgall += msg + " ... FAILED\n" - - return success, msgall - - def _compare_budgets(self, msgall, extensions="cbc"): - if isinstance(extensions, str): - extensions = [extensions] - success = True - files0, files1 = self._regression_files(extensions) - extension = "cbc" - ipos = 0 - for idx, (fpth0, fpth1) in enumerate(zip(files0, files1)): - if os.stat(fpth0).st_size * os.stat(fpth0).st_size == 0: - continue - outfile = os.path.splitext(os.path.basename(fpth0))[0] - outfile = os.path.join( - self.simpath, outfile + f".{extension}.cmp.out" - ) - fcmp = open(outfile, "w") - - # open the files - cbc0 = flopy.utils.CellBudgetFile( - fpth0, precision="double", verbose=self.cmp_verbose - ) - cbc1 = flopy.utils.CellBudgetFile( - fpth1, precision="double", verbose=self.cmp_verbose - ) - - # build list of cbc data to retrieve - avail0 = cbc0.get_unique_record_names() - avail1 = cbc1.get_unique_record_names() - avail0 = [t.decode().strip() for t in avail0] - avail1 = [t.decode().strip() for t in avail1] - - # initialize list for storing totals for each budget term terms - cbc_keys0 = [] - cbc_keys1 = [] - for t in avail0: - t1 = t - if t not in avail1: - # check if RCHA or EVTA is available and use that instead - # should be able to remove this once v6.3.0 is released - if t[:-1] in avail1: - t1 = t[:-1] - else: - raise Exception(f"Could not find {t} in {fpth1}") - cbc_keys0.append(t) - cbc_keys1.append(t1) - - # get list of times and kstpkper - kk = cbc0.get_kstpkper() - times = cbc0.get_times() - - # process data - success_tst = True - for key, key1 in zip(cbc_keys0, cbc_keys1): - for idx, (k, t) in enumerate(zip(kk, times)): - v0 = cbc0.get_data(kstpkper=k, text=key)[0] - v1 = cbc1.get_data(kstpkper=k, text=key1)[0] - if v0.dtype.names is not None: - v0 = v0["q"] - v1 = v1["q"] - # skip empty vectors - if v0.size < 1: - continue - vmin = self.rclose - if vmin < 1e-6: - vmin = 1e-6 - vmin_tol = 5.0 * vmin - idx = (abs(v0) > vmin) & (abs(v1) > vmin) - diff = np.zeros(v0.shape, dtype=v0.dtype) - diff[idx] = abs(v0[idx] - v1[idx]) - diffmax = diff.max() - indices = np.where(diff == diffmax)[0] - if diffmax > vmin_tol: - success_tst = False - msg = ( - f"{os.path.basename(fpth0)} - " - + f"{key:16s} " - + f"difference ({diffmax:10.4g}) " - + f"> {self.pdtol:10.4g} " - + f"at {indices.size} nodes " - + f" [first location ({indices[0] + 1})] " - + f"at time {t} " - ) - fcmp.write(f"{msg}\n") - if self.cmp_verbose: - print(msg) - - msg = sfmt.format( - f"{extdict[extension]} comparison {ipos + 1}", - f"{self.name} ({os.path.basename(fpth0)})", - ) - ipos += 1 - print(msg) - - fcmp.close() - - if not success_tst: - success = False - msgall += msg + " ... FAILED\n" - - return success, msgall - - -def api_return(success, model_ws): - """ - parse libmf6 stdout shared object file - """ - fpth = os.path.join(model_ws, "mfsim.stdout") - return success, open(fpth).readlines() diff --git a/modflow_devtools/targets.py b/modflow_devtools/targets.py deleted file mode 100644 index 2c08c66..0000000 --- a/modflow_devtools/targets.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import subprocess -import sys - -import flopy - -# paths to executables for previous versions of MODFLOW -downloaded_bindir = os.path.join( - os.path.dirname(__file__), "..", "bin", "downloaded" -) -rebuilt_bindir = os.path.join( - os.path.dirname(__file__), "..", "bin", "rebuilt" -) - -# paths to MODFLOW 6 executable, source files, and example files -bindir = os.path.join("..", "bin") - - -def target_pth(target, pth): - exe_exists = flopy.which(target, path=pth) - # if target does not exist in specified path determine if it - # exists anywhere in the path - if exe_exists is None: - exe_exists = flopy.which(target) - if exe_exists is None: - exe_exists = os.path.abspath(os.path.join(pth, target)) - raise Exception(f"{exe_exists} does not exist or is not executable.") - return os.path.abspath(exe_exists) - - -def get_target_dictionary(): - target_ext = "" - target_so = ".so" - sysinfo = sys.platform.lower() - if sysinfo.lower() == "win32": - target_ext = ".exe" - target_so = ".dll" - elif sysinfo.lower() == "darwin": - target_so = ".dylib" - - # create dictionary of valid executable targets for regression tests - target_dict = {} - - target = target_pth(f"mf2005dbl{target_ext}", downloaded_bindir) - target_dict["mf2005"] = target - target = target_pth(f"mfnwtdbl{target_ext}", downloaded_bindir) - target_dict["mfnwt"] = target - target = target_pth(f"mfusgdbl{target_ext}", downloaded_bindir) - target_dict["mfusg"] = target - target = target_pth(f"mflgrdbl{target_ext}", downloaded_bindir) - target_dict["mflgr"] = target - target = target_pth(f"mf2005{target_ext}", downloaded_bindir) - target_dict["mf2005s"] = target - target = target_pth(f"mt3dms{target_ext}", downloaded_bindir) - target_dict["mt3dms"] = target - target = target_pth(f"mf6{target_ext}", rebuilt_bindir) - target_dict["mf6-regression"] = target - - # create MODFLOW 6 target name and add to dictionary - program = f"mf6{target_ext}" - target = os.path.join(bindir, program) - target_dict["mf6"] = target - - # create MODFLOW 6 so/dll target name - tprog = f"libmf6{target_so}" - ttarg = os.path.join(bindir, tprog) - target_dict["libmf6"] = ttarg - - # add MODFLOW 5 to 6 converter to dictionary of valid executable targets - tprog = f"mf5to6{target_ext}" - ttarg = os.path.join(bindir, tprog) - target_dict["mf5to6"] = ttarg - - # add Zonebudget for 6 to dictionary of valid executable targets - tprog = f"zbud6{target_ext}" - ttarg = os.path.join(bindir, tprog) - target_dict["zbud6"] = ttarg - - return target_dict - - -def run_exe(argv, ws="."): - buff = [] - proc = subprocess.Popen( - argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=ws - ) - result, error = proc.communicate() - if result is not None: - c = result.decode("utf-8") - c = c.rstrip("\r\n") - print(f"{c}") - buff.append(c) - - return proc.returncode, buff - - -def get_mf6_version(version="mf6"): - """Function to get MODFLOW 6 version number""" - exe = get_target_dictionary()[version] - return_code, buff = run_exe((exe, "-v")) - if return_code == 0: - version = buff[0].split()[1] - else: - version = None - return version diff --git a/modflow_devtools/test/__init__.py b/modflow_devtools/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modflow_devtools/test/test_case.py b/modflow_devtools/test/test_case.py new file mode 100644 index 0000000..9d4c514 --- /dev/null +++ b/modflow_devtools/test/test_case.py @@ -0,0 +1,33 @@ +import pytest +from modflow_devtools.case import Case + + +def test_requires_name(): + with pytest.raises(ValueError): + Case() + + +def test_defaults(): + assert not Case(name="test").xfail + + +def test_copy(): + case = Case(name="test", foo="bar") + copy = case.copy() + + assert case is not copy + assert case == copy + + +def test_copy_update(): + case = Case(name="test", foo="bar") + copy = case.copy_update() + + assert case is not copy + assert case == copy + + copy2 = case.copy_update(foo="baz") + + assert copy is not copy2 + assert copy.foo == "bar" + assert copy2.foo == "baz" diff --git a/modflow_devtools/test/test_context.py b/modflow_devtools/test/test_context.py new file mode 100644 index 0000000..e69de29 diff --git a/modflow_devtools/test/test_download.py b/modflow_devtools/test/test_download.py new file mode 100644 index 0000000..e191986 --- /dev/null +++ b/modflow_devtools/test/test_download.py @@ -0,0 +1,22 @@ +import pytest +from modflow_devtools.download import download_and_unzip +from modflow_devtools.markers import requires_github + + +@requires_github +@pytest.mark.parametrize("delete_zip", [True, False]) +def test_download_and_unzip(function_tmpdir, delete_zip): + zip_name = "mf6.3.0_linux.zip" + dir_name = zip_name.replace(".zip", "") + url = f"https://github.com/MODFLOW-USGS/modflow6/releases/download/6.3.0/{zip_name}" + download_and_unzip( + url, function_tmpdir, delete_zip=delete_zip, verbose=True + ) + + assert (function_tmpdir / zip_name).is_file() != delete_zip + + dir_path = function_tmpdir / dir_name + assert dir_path.is_dir() + + contents = list(dir_path.rglob("*")) + assert len(contents) > 0 diff --git a/modflow_devtools/test/test_executables.py b/modflow_devtools/test/test_executables.py new file mode 100644 index 0000000..86cf0ae --- /dev/null +++ b/modflow_devtools/test/test_executables.py @@ -0,0 +1,50 @@ +import subprocess +import sys +from os import environ +from pathlib import Path + +import pytest +from modflow_devtools.executables import Executables +from modflow_devtools.misc import add_sys_path, get_suffixes + +_bin_path = Path(environ.get("BIN_PATH")).expanduser() +_ext, _ = get_suffixes(sys.platform) + + +@pytest.fixture +def bin_path(module_tmpdir) -> Path: + return _bin_path.absolute() + + +@pytest.mark.skipif(not _bin_path.is_dir(), reason="bin directory not found") +def test_get_path(bin_path): + with add_sys_path(str(_bin_path)): + ext, _ = get_suffixes(sys.platform) + assert ( + Executables.get_path("mf6", path=_bin_path) + == _bin_path / f"mf6{ext}" + ) + + +def test_get_version(bin_path): + with add_sys_path(str(bin_path)): + ver_str = Executables.get_version("mf6", path=bin_path).partition(" ") + print(ver_str) + version = int(ver_str[0].split(".")[0]) + assert version >= 6 + + +@pytest.fixture +def exes(bin_path): + return Executables(mf6=bin_path / f"mf6{_ext}") + + +def test_executables_mapping(bin_path, exes): + print(exes.mf6) + assert exes.mf6 == bin_path / f"mf6{_ext}" + + +def test_executables_usage(exes): + output = subprocess.check_output([f"{exes.mf6}", "-v"]).decode("utf-8") + print(output) + assert "mf6" in output diff --git a/modflow_devtools/test/test_fixtures.py b/modflow_devtools/test/test_fixtures.py new file mode 100644 index 0000000..b85da14 --- /dev/null +++ b/modflow_devtools/test/test_fixtures.py @@ -0,0 +1,262 @@ +import inspect +import platform +from pathlib import Path + +import pytest +from _pytest.config import ExitCode + +system = platform.system() +proj_root = Path(__file__).parent.parent.parent.parent + + +# test temporary directory fixtures + + +def test_tmpdirs(function_tmpdir, module_tmpdir): + # function-scoped temporary directory + assert isinstance(function_tmpdir, Path) + assert function_tmpdir.is_dir() + assert inspect.currentframe().f_code.co_name in function_tmpdir.stem + + # module-scoped temp dir (accessible to other tests in the script) + assert module_tmpdir.is_dir() + assert "test" in module_tmpdir.stem + + +def test_function_scoped_tmpdir(function_tmpdir): + assert isinstance(function_tmpdir, Path) + assert function_tmpdir.is_dir() + assert inspect.currentframe().f_code.co_name in function_tmpdir.stem + + +@pytest.mark.parametrize("name", ["noslash", "forward/slash", "back\\slash"]) +def test_function_scoped_tmpdir_slash_in_name(function_tmpdir, name): + assert isinstance(function_tmpdir, Path) + assert function_tmpdir.is_dir() + + # node name might have slashes if test function is parametrized + # (e.g., test_function_scoped_tmpdir_slash_in_name[a/slash]) + replaced1 = name.replace("/", "_").replace("\\", "_").replace(":", "_") + replaced2 = name.replace("/", "_").replace("\\", "__").replace(":", "_") + assert ( + f"{inspect.currentframe().f_code.co_name}[{replaced1}]" + in function_tmpdir.stem + or f"{inspect.currentframe().f_code.co_name}[{replaced2}]" + in function_tmpdir.stem + ) + + +class TestClassScopedTmpdir: + filename = "hello.txt" + + @pytest.fixture(autouse=True) + def setup(self, class_tmpdir): + with open(class_tmpdir / self.filename, "w") as file: + file.write("hello, class-scoped tmpdir") + + def test_class_scoped_tmpdir(self, class_tmpdir): + assert isinstance(class_tmpdir, Path) + assert class_tmpdir.is_dir() + assert self.__class__.__name__ in class_tmpdir.stem + assert Path(class_tmpdir / self.filename).is_file() + + +def test_module_scoped_tmpdir(module_tmpdir): + assert isinstance(module_tmpdir, Path) + assert module_tmpdir.is_dir() + assert Path(inspect.getmodulename(__file__)).stem in module_tmpdir.name + + +def test_session_scoped_tmpdir(session_tmpdir): + assert isinstance(session_tmpdir, Path) + assert session_tmpdir.is_dir() + + +# test CLI arguments --keep (-K) and --keep-failed for temp dir fixtures + +FILE_NAME = "hello.txt" + + +@pytest.mark.meta("test_keep") +def test_keep_function_scoped_tmpdir_inner(function_tmpdir): + with open(function_tmpdir / FILE_NAME, "w") as f: + f.write("hello, function-scoped tmpdir") + + +@pytest.mark.meta("test_keep") +class TestKeepClassScopedTmpdirInner: + def test_keep_class_scoped_tmpdir_inner(self, class_tmpdir): + with open(class_tmpdir / FILE_NAME, "w") as f: + f.write("hello, class-scoped tmpdir") + + +@pytest.mark.meta("test_keep") +def test_keep_module_scoped_tmpdir_inner(module_tmpdir): + with open(module_tmpdir / FILE_NAME, "w") as f: + f.write("hello, module-scoped tmpdir") + + +@pytest.mark.meta("test_keep") +def test_keep_session_scoped_tmpdir_inner(session_tmpdir): + with open(session_tmpdir / FILE_NAME, "w") as f: + f.write("hello, session-scoped tmpdir") + + +@pytest.mark.parametrize("arg", ["--keep", "-K"]) +def test_keep_function_scoped_tmpdir(function_tmpdir, arg): + inner_fn = test_keep_function_scoped_tmpdir_inner.__name__ + args = [ + __file__, + "-v", + "-s", + "-k", + inner_fn, + "-M", + "test_keep", + "-K", + function_tmpdir, + ] + assert pytest.main(args) == ExitCode.OK + assert Path(function_tmpdir / f"{inner_fn}0" / FILE_NAME).is_file() + + +@pytest.mark.parametrize("arg", ["--keep", "-K"]) +def test_keep_class_scoped_tmpdir(tmp_path, arg): + args = [ + __file__, + "-v", + "-s", + "-k", + TestKeepClassScopedTmpdirInner.test_keep_class_scoped_tmpdir_inner.__name__, + "-M", + "test_keep", + "-K", + tmp_path, + ] + assert pytest.main(args) == ExitCode.OK + assert Path( + tmp_path / f"{TestKeepClassScopedTmpdirInner.__name__}0" / FILE_NAME + ).is_file() + + +@pytest.mark.parametrize("arg", ["--keep", "-K"]) +def test_keep_module_scoped_tmpdir(tmp_path, arg): + args = [ + __file__, + "-v", + "-s", + "-k", + test_keep_module_scoped_tmpdir_inner.__name__, + "-M", + "test_keep", + "-K", + tmp_path, + ] + assert pytest.main(args) == ExitCode.OK + this_path = Path(__file__) + keep_path = ( + tmp_path + / f"{str(this_path.parent.parent.name)}.{str(this_path.parent.name)}.{str(this_path.stem)}0" + ) + from pprint import pprint + + print(keep_path) + pprint(list(keep_path.glob("*"))) + assert FILE_NAME in [f.name for f in keep_path.glob("*")] + + +@pytest.mark.parametrize("arg", ["--keep", "-K"]) +def test_keep_session_scoped_tmpdir(tmp_path, arg, request): + args = [ + __file__, + "-v", + "-s", + "-k", + test_keep_session_scoped_tmpdir_inner.__name__, + "-M", + "test_keep", + "-K", + tmp_path, + ] + assert pytest.main(args) == ExitCode.OK + assert Path(tmp_path / f"{request.session.name}0" / FILE_NAME).is_file() + + +@pytest.mark.meta("test_keep_failed") +def test_keep_failed_function_scoped_tmpdir_inner(function_tmpdir): + with open(function_tmpdir / FILE_NAME, "w") as f: + f.write("hello, function-scoped tmpdir") + + assert False, "oh no" + + +@pytest.mark.parametrize("keep", [True, False]) +def test_keep_failed_function_scoped_tmpdir(function_tmpdir, keep): + inner_fn = test_keep_failed_function_scoped_tmpdir_inner.__name__ + args = [__file__, "-v", "-s", "-k", inner_fn, "-M", "test_keep_failed"] + if keep: + args += ["--keep-failed", function_tmpdir] + assert pytest.main(args) == ExitCode.TESTS_FAILED + + kept_file = Path(function_tmpdir / f"{inner_fn}0" / FILE_NAME).is_file() + assert kept_file if keep else not kept_file + + +# test meta-test marker and CLI argument --meta (-M) + + +@pytest.mark.meta("test_meta") +def test_meta_inner(): + pass + + +class TestMeta: + def pytest_terminal_summary(self, terminalreporter): + stats = terminalreporter.stats + assert "failed" not in stats + + passed = [test.head_line for test in stats["passed"]] + assert len(passed) == 1 + assert test_meta_inner.__name__ in passed + + deselected = [fn.name for fn in stats["deselected"]] + assert len(deselected) > 0 + + +def test_meta(): + args = [ + f"{__file__}", + "-v", + "-s", + "-k", + test_meta_inner.__name__, + "-M", + "test_meta", + ] + assert pytest.main(args, plugins=[TestMeta()]) == ExitCode.OK + + +# test fixtures dynamically generated from examples and test models + + +def test_example_scenario(example_scenario): + assert isinstance(example_scenario, tuple) + name, namefiles = example_scenario + assert isinstance(name, str) + assert isinstance(namefiles, list) + assert all(namefile.is_file() for namefile in namefiles) + + +def test_test_model_mf6(test_model_mf6): + assert isinstance(test_model_mf6, Path) + assert (test_model_mf6 / "mfsim.nam").is_file() + + +def test_test_model_mf5to6(test_model_mf5to6): + assert isinstance(test_model_mf5to6, Path) + assert len(list(test_model_mf5to6.glob("*.nam"))) >= 1 + + +def test_large_test_model(large_test_model): + assert isinstance(large_test_model, Path) + assert (large_test_model / "mfsim.nam").is_file() diff --git a/modflow_devtools/test/test_framework.py b/modflow_devtools/test/test_framework.py new file mode 100644 index 0000000..e69de29 diff --git a/modflow_devtools/test/test_markers.py b/modflow_devtools/test/test_markers.py new file mode 100644 index 0000000..95f497c --- /dev/null +++ b/modflow_devtools/test/test_markers.py @@ -0,0 +1,49 @@ +from os import environ +from platform import system +from shutil import which + +from modflow_devtools.markers import ( + excludes_platform, + requires_exe, + requires_pkg, + requires_platform, +) + + +@requires_exe("mf6") +def test_requires_exe(): + assert which("mf6") + + +exes = ["mfusg", "mfnwt"] + + +@requires_exe(*exes) +def test_requires_exe_multiple(): + assert all(which(exe) for exe in exes) + + +@requires_pkg("numpy") +def test_requires_pkg(): + import numpy + + assert numpy is not None + + +@requires_pkg("numpy", "matplotlib") +def test_requires_pkg_multiple(): + import matplotlib + import numpy + + assert numpy is not None and matplotlib is not None + + +@requires_platform("Windows") +def test_requires_platform(): + assert system() == "Windows" + + +@excludes_platform("Darwin", ci_only=True) +def test_requires_platform_ci_only(): + if "CI" in environ: + assert system() != "Darwin" diff --git a/modflow_devtools/test/test_misc.py b/modflow_devtools/test/test_misc.py new file mode 100644 index 0000000..a89931e --- /dev/null +++ b/modflow_devtools/test/test_misc.py @@ -0,0 +1,2 @@ +def test_set_dir(): + pass diff --git a/modflow_devtools/test/test_zip.py b/modflow_devtools/test/test_zip.py new file mode 100644 index 0000000..e69de29 diff --git a/modflow_devtools/testing/__init__.py b/modflow_devtools/testing/__init__.py deleted file mode 100644 index 143f486..0000000 --- a/modflow_devtools/testing/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# __init__.py diff --git a/modflow_devtools/testing/budget_testing.py b/modflow_devtools/testing/budget_testing.py deleted file mode 100644 index cd69f2c..0000000 --- a/modflow_devtools/testing/budget_testing.py +++ /dev/null @@ -1,120 +0,0 @@ -# utility for comparing two MODFLOW 6 budget files - -# To use this eval_bud_diff function on a gwf or gwt budget file, -# the function may need ia, in order to exclude comparison of the residual -# term, which is stored in the diagonal position of the flowja array. -# The following code can be used to extract ia from the grb file. -# get ia/ja from binary grid file -# fname = '{}.dis.grb'.format(os.path.basename(sim.name)) -# fpth = os.path.join(sim.simpath, fname) -# grbobj = flopy.mf6.utils.MfGrdFile(fpth) -# ia = grbobj._datadict['IA'] - 1 - - -import os - -import numpy as np - - -def eval_bud_diff(fpth, b0, b1, ia=None, dtol=1e-6): - diffmax = 0.0 - difftag = "None" - difftime = None - fail = False - - # build list of cbc data to retrieve - avail = b0.get_unique_record_names() - - # initialize list for storing totals for each budget term terms - cbc_keys = [] - for t in avail: - if isinstance(t, bytes): - t = t.decode() - t = t.strip() - cbc_keys.append(t) - - # open a summary file and write header - f = open(fpth, "w") - line = f"{'Time':15s}" - line += f" {'Datatype':15s}" - line += f" {'File 1':15s}" - line += f" {'File 2':15s}" - line += f" {'Difference':15s}" - f.write(line + "\n") - f.write(len(line) * "-" + "\n") - - # get data from cbc file - kk = b0.get_kstpkper() - times = b0.get_times() - for idx, (k, t) in enumerate(zip(kk, times)): - v0sum = 0.0 - v1sum = 0.0 - for key in cbc_keys: - v0 = b0.get_data(kstpkper=k, text=key)[0] - v1 = b1.get_data(kstpkper=k, text=key)[0] - if isinstance(v0, np.recarray): - v0 = v0["q"].sum() - v1 = v1["q"].sum() - else: - v0 = v0.flatten() - v1 = v1.flatten() - if key == "FLOW-JA-FACE": - # Set residual (stored in diagonal of flowja) to zero - if ia is None: - raise Exception("ia is required for model flowja") - idiagidx = ia[:-1] - v0[idiagidx] = 0.0 - v1[idiagidx] = 0.0 - v0 = v0.sum() - v1 = v1.sum() - - # sum all of the values - if key != "AUXILIARY": - v0sum += v0 - v1sum += v1 - - diff = v0 - v1 - if abs(diff) > abs(diffmax): - diffmax = diff - difftag = key - difftime = t - if abs(diff) > dtol: - fail = True - line = f"{t:15g}" - line += f" {key:15s}" - line += f" {v0:15g}" - line += f" {v1:15g}" - line += f" {diff:15g}" - f.write(line + "\n") - - # evaluate the sums - diff = v0sum - v1sum - if abs(diff) > dtol: - fail = True - line = f"{t:15g}" - line += f" {'TOTAL':15s}" - line += f" {v0sum:15g}" - line += f" {v1sum:15g}" - line += f" {diff:15g}" - f.write(line + "\n") - - msg = f"\nSummary of changes in {os.path.basename(fpth)}\n" - msg += "-" * 72 + "\n" - msg += f"Maximum cbc difference: {diffmax}\n" - msg += f"Maximum cbc difference time: {difftime}\n" - msg += f"Maximum cbc datatype: {difftag}\n" - if fail: - msg += f"Maximum cbc criteria exceeded: {dtol}" - assert not fail, msg - - # close summary file and print the final message - f.close() - print(msg) - - msg = f"sum of first cbc file flows ({v0sum}) " + f"exceeds dtol ({dtol})" - assert abs(v0sum) < dtol, msg - - msg = f"sum of second cbc file flows ({v1sum}) " + f"exceeds dtol ({dtol})" - assert abs(v1sum) < dtol, msg - - return diff --git a/modflow_devtools/testing/testing.py b/modflow_devtools/testing/testing.py deleted file mode 100644 index 9b541e5..0000000 --- a/modflow_devtools/testing/testing.py +++ /dev/null @@ -1,2411 +0,0 @@ -"""A typical example of using the autotest -functionality for MODFLOW-2005 and comparing the MODFLOW-2005 results to -MODFLOW-2000 results is: - -.. code-block:: python - - import pymake - - # Setup - testpth = "../test/mytest" - nam1 = "model1.nam" - pymake.setup(nam1, testpth) - - # run test models - success, buff = flopy.run_model( - "mf2005", nam1, model_ws=testpth, silent=True - ) - if success: - testpth_reg = os.path.join(testpth, "mf2000") - nam2 = "model2.name" - pymake.setup(nam2, testpth_reg) - success_reg, buff = flopy.run_model( - "mf2000", nam2, model_ws=testpth_reg, silent=True - ) - - # compare results - if success and success_reg: - fpth = os.path.split(os.path.join(testpth, nam1))[0] - outfile1 = os.path.join(fpth, "bud.cmp") - fpth = os.path.split(os.path.join(testpth, nam2))[0] - outfile2 = os.path.join(fpth, "hds.cmp") - success_reg = pymake.compare( - os.path.join(testpth, nam1), - os.path.join(testpth_reg, nam2), - max_cumpd=0.01, - max_incpd=0.01, - htol=0.001, - outfile1=outfile1, - outfile2=outfile2, - ) - - # Clean things up - if success_reg: - pymake.teardown(testpth) - -Note: autotest functionality will likely be removed from pymake in the future -to a dedicated GitHub repository. - -""" -import os -import shutil -import textwrap - -import numpy as np - -ignore_ext = ( - ".hds", - ".hed", - ".bud", - ".cbb", - ".cbc", - ".ddn", - ".ucn", - ".glo", - ".lst", - ".list", - ".gwv", - ".mv", - ".out", -) - - -def model_setup(namefile, dst, remove_existing=True, extrafiles=None): - """Setup MODFLOW-based model files for autotests. - - Parameters - ---------- - namefile : str - MODFLOW-based model name file. - dst : str - destination path for comparison model or file(s) - remove_existing : bool - boolean indicating if an existing comparision model or file(s) should - be replaced (default is True) - extrafiles : str or list of str - list of extra files to include in the comparision - - Returns - ------- - - """ - # Construct src pth from namefile or lgr file - src = os.path.dirname(namefile) - - # Create the destination folder, if required - create_dir = False - if os.path.exists(dst): - if remove_existing: - print("Removing folder " + dst) - shutil.rmtree(dst) - create_dir = True - else: - create_dir = True - if create_dir: - os.mkdir(dst) - - # determine if a namefile is a lgr control file - get individual - # name files out of the lgr control file - namefiles = [namefile] - ext = os.path.splitext(namefile)[1] - if ".lgr" in ext.lower(): - lines = [line.rstrip("\n") for line in open(namefile)] - for line in lines: - if len(line) < 1: - continue - if line[0] == "#": - continue - t = line.split() - if ".nam" in t[0].lower(): - fpth = os.path.join(src, t[0]) - namefiles.append(fpth) - - # Make list of files to copy - files2copy = [] - for fpth in namefiles: - files2copy.append(os.path.basename(fpth)) - ext = os.path.splitext(fpth)[1] - # copy additional files contained in the name file and - # associated package files - if ext.lower() == ".nam": - fname = os.path.abspath(fpth) - files2copy = files2copy + get_input_files(fname) - - if extrafiles is not None: - if isinstance(extrafiles, str): - extrafiles = [extrafiles] - for fl in extrafiles: - files2copy.append(os.path.basename(fl)) - - # Copy the files - for f in files2copy: - srcf = os.path.join(src, f) - dstf = os.path.join(dst, f) - - # Check to see if dstf is going into a subfolder, and create that - # subfolder if it doesn't exist - sf = os.path.dirname(dstf) - if not os.path.isdir(sf): - os.makedirs(sf) - - # Now copy the file - if os.path.exists(srcf): - print("Copy file '" + srcf + "' -> '" + dstf + "'") - shutil.copy(srcf, dstf) - else: - print(srcf + " does not exist") - - return - - -def setup_comparison(namefile, dst, remove_existing=True): - """Setup a comparison model or comparision file(s) for a MODFLOW-based - model. - - Parameters - ---------- - namefile : str - MODFLOW-based model name file. - dst : str - destination path for comparison model or file(s) - remove_existing : bool - boolean indicating if an existing comparision model or file(s) should - be replaced (default is True) - - - Returns - ------- - - """ - # Construct src pth from namefile - src = os.path.dirname(namefile) - action = None - for root, dirs, files in os.walk(src): - dl = [d.lower() for d in dirs] - if any(".cmp" in s for s in dl): - idx = None - for jdx, d in enumerate(dl): - if ".cmp" in d: - idx = jdx - break - if idx is not None: - if "mf2005.cmp" in dl[idx] or "mf2005" in dl[idx]: - action = dirs[idx] - elif "mfnwt.cmp" in dl[idx] or "mfnwt" in dl[idx]: - action = dirs[idx] - elif "mfusg.cmp" in dl[idx] or "mfusg" in dl[idx]: - action = dirs[idx] - elif "mf6.cmp" in dl[idx] or "mf6" in dl[idx]: - action = dirs[idx] - elif "libmf6.cmp" in dl[idx] or "libmf6" in dl[idx]: - action = dirs[idx] - else: - action = dirs[idx] - break - if action is not None: - dst = os.path.join(dst, f"{action}") - if not os.path.isdir(dst): - try: - os.mkdir(dst) - except: - print("Could not make " + dst) - # clean directory - else: - print(f"cleaning...{dst}") - for root, dirs, files in os.walk(dst): - for f in files: - tpth = os.path.join(root, f) - print(f" removing...{tpth}") - os.remove(tpth) - for d in dirs: - tdir = os.path.join(root, d) - print(f" removing...{tdir}") - shutil.rmtree(tdir) - # copy files - cmppth = os.path.join(src, action) - files = os.listdir(cmppth) - files2copy = [] - if action.lower() == ".cmp": - for file in files: - if ".cmp" in os.path.splitext(file)[1].lower(): - files2copy.append(os.path.join(cmppth, file)) - for srcf in files2copy: - f = os.path.basename(srcf) - dstf = os.path.join(dst, f) - # Now copy the file - if os.path.exists(srcf): - print("Copy file '" + srcf + "' -> '" + dstf + "'") - shutil.copy(srcf, dstf) - else: - print(srcf + " does not exist") - else: - for file in files: - if ".nam" in os.path.splitext(file)[1].lower(): - files2copy.append( - os.path.join(cmppth, os.path.basename(file)) - ) - nf = os.path.join(src, action, os.path.basename(file)) - model_setup(nf, dst, remove_existing=remove_existing) - break - - return action - - -def teardown(src): - """Teardown a autotest directory. - - Parameters - ---------- - src : str - autotest directory to teardown - - Returns - ------- - - """ - if os.path.exists(src): - print("Removing folder " + src) - shutil.rmtree(src) - return - - -def get_input_files(namefile): - """Return a list of all the input files in this model. - - Parameters - ---------- - namefile : str - path to a MODFLOW-based model name file - - Returns - ------- - filelist : list - list of MODFLOW-based model input files - - """ - srcdir = os.path.dirname(namefile) - filelist = [] - fname = os.path.join(srcdir, namefile) - with open(fname, "r") as f: - lines = f.readlines() - - for line in lines: - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - ext = os.path.splitext(ll[2])[1] - if ext.lower() not in ignore_ext: - if len(ll) > 3: - if "replace" in ll[3].lower(): - continue - filelist.append(ll[2]) - - # Now go through every file and look for other files to copy, - # such as 'OPEN/CLOSE'. If found, then add that file to the - # list of files to copy. - otherfiles = [] - for fname in filelist: - fname = os.path.join(srcdir, fname) - try: - f = open(fname, "r") - for line in f: - - # Skip invalid lines - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - - if "OPEN/CLOSE" in line.upper(): - for i, s in enumerate(ll): - if "OPEN/CLOSE" in s.upper(): - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - otherfiles.append(stmp) - break - except: - print(fname + " does not exist") - - filelist = filelist + otherfiles - - return filelist - - -def get_namefiles(pth, exclude=None): - """Search through a path (pth) for all .nam files. - - Parameters - ---------- - pth : str - path to model files - exclude : str or lst - File or list of files to exclude from the search (default is None) - - Returns - ------- - namefiles : lst - List of namefiles with paths - - """ - namefiles = [] - for root, _, files in os.walk(pth): - namefiles += [ - os.path.join(root, file) for file in files if file.endswith(".nam") - ] - if exclude is not None: - if isinstance(exclude, str): - exclude = [exclude] - exclude = [e.lower() for e in exclude] - pop_list = [] - for namefile in namefiles: - for e in exclude: - if e in namefile.lower(): - pop_list.append(namefile) - for e in pop_list: - namefiles.remove(e) - - return namefiles - - -def get_entries_from_namefile(namefile, ftype=None, unit=None, extension=None): - """Get entries from a namefile. Can select using FTYPE, UNIT, or file - extension. - - Parameters - ---------- - namefile : str - path to a MODFLOW-based model name file - ftype : str - package type - unit : int - file unit number - extension : str - file extension - - Returns - ------- - entries : list of tuples - list of tuples containing FTYPE, UNIT, FNAME, STATUS for each - namefile entry that meets a user-specified value. - - """ - entries = [] - f = open(namefile, "r") - for line in f: - if line.strip() == "": - continue - if line[0] == "#": - continue - ll = line.strip().split() - if len(ll) < 3: - continue - status = "UNKNOWN" - if len(ll) > 3: - status = ll[3].upper() - if ftype is not None: - if ftype.upper() == ll[0].upper(): - filename = os.path.join(os.path.split(namefile)[0], ll[2]) - entries.append((filename, ll[0], ll[1], status)) - elif unit is not None: - if int(unit) == int(ll[1]): - filename = os.path.join(os.path.split(namefile)[0], ll[2]) - entries.append((filename, ll[0], ll[1], status)) - elif extension is not None: - filename = os.path.join(os.path.split(namefile)[0], ll[2]) - ext = os.path.splitext(filename)[1] - if len(ext) > 0: - if ext[0] == ".": - ext = ext[1:] - if extension.lower() == ext.lower(): - entries.append((filename, ll[0], ll[1], status)) - f.close() - if len(entries) < 1: - entries.append((None, None, None, None)) - return entries - - -def get_sim_name(namefiles, rootpth=None): - """Get simulation name. - - Parameters - ---------- - namefiles : str or list of strings - path(s) to MODFLOW-based model name files - rootpth : str - optional root directory path (default is None) - - Returns - ------- - simname : list - list of namefiles without the file extension - - """ - if isinstance(namefiles, str): - namefiles = [namefiles] - sim_name = [] - for namefile in namefiles: - t = namefile.split(os.sep) - if rootpth is None: - idx = -1 - else: - idx = t.index(os.path.split(rootpth)[1]) - - # build dst with everything after the rootpth and before - # the namefile file name. - dst = "" - if idx < len(t): - for d in t[idx + 1 : -1]: - dst += f"{d}_" - - # add namefile basename without extension - dst += t[-1].replace(".nam", "") - sim_name.append(dst) - - return sim_name - - -# modflow 6 readers and copiers -def setup_mf6( - src, dst, mfnamefile="mfsim.nam", extrafiles=None, remove_existing=True -): - """Copy all of the MODFLOW 6 input files from the src directory to the dst - directory. - - Parameters - ---------- - src : src - directory path with original MODFLOW 6 input files - dst : str - directory path that original MODFLOW 6 input files will be copied to - mfnamefile : str - optional MODFLOW 6 simulation name file (default is mfsim.nam) - extrafiles : bool - boolean indicating if extra files should be included (default is None) - remove_existing : bool - boolean indicating if existing file in dst should be removed (default - is True) - - Returns - ------- - mf6inp : list - list of MODFLOW 6 input files - mf6outp : list - list of MODFLOW 6 output files - - """ - - # Create the destination folder - create_dir = False - if os.path.exists(dst): - if remove_existing: - print("Removing folder " + dst) - shutil.rmtree(dst) - create_dir = True - else: - create_dir = True - if create_dir: - os.makedirs(dst) - - # Make list of files to copy - fname = os.path.join(src, mfnamefile) - fname = os.path.abspath(fname) - mf6inp, mf6outp = get_mf6_files(fname) - files2copy = [mfnamefile] + mf6inp - - # determine if there are any .ex files - exinp = [] - for f in mf6outp: - ext = os.path.splitext(f)[1] - if ext.lower() == ".hds": - pth = os.path.join(src, f + ".ex") - if os.path.isfile(pth): - exinp.append(f + ".ex") - if len(exinp) > 0: - files2copy += exinp - if extrafiles is not None: - files2copy += extrafiles - - # Copy the files - for f in files2copy: - srcf = os.path.join(src, f) - dstf = os.path.join(dst, f) - - # Check to see if dstf is going into a subfolder, and create that - # subfolder if it doesn't exist - sf = os.path.dirname(dstf) - if not os.path.isdir(sf): - try: - os.mkdir(sf) - except: - print("Could not make " + sf) - - # Now copy the file - if os.path.exists(srcf): - print("Copy file '" + srcf + "' -> '" + dstf + "'") - shutil.copy(srcf, dstf) - else: - print(srcf + " does not exist") - - return mf6inp, mf6outp - - -def get_mf6_comparison(src): - """Determine comparison type for MODFLOW 6 simulation. - - Parameters - ---------- - src : str - directory path to search for comparison types - - Returns - ------- - action : str - comparison type - - """ - action = None - # Possible comparison - the order matters - optcomp = ( - "compare", - ".cmp", - "mf2005", - "mf2005.cmp", - "mfnwt", - "mfnwt.cmp", - "mfusg", - "mfusg.cmp", - "mflgr", - "mflgr.cmp", - "libmf6", - "libmf6.cmp", - "mf6", - "mf6.cmp", - ) - # Construct src pth from namefile - action = None - for _, dirs, _ in os.walk(src): - dl = [d.lower() for d in dirs] - for oc in optcomp: - if any(oc in s for s in dl): - action = oc - break - return action - - -def setup_mf6_comparison(src, dst, remove_existing=True): - """Setup comparision for MODFLOW 6 simulation. - - Parameters - ---------- - src : src - directory path with original MODFLOW 6 input files - dst : str - directory path that original MODFLOW 6 input files will be copied to - remove_existing : bool - boolean indicating if existing file in dst should be removed (default - is True) - - Returns - ------- - action : str - comparison type - - """ - # get the type of comparison to use (compare, mf2005, etc.) - action = get_mf6_comparison(src) - - if action is not None: - dst = os.path.join(dst, f"{action}") - if not os.path.isdir(dst): - try: - os.mkdir(dst) - except: - print("Could not make " + dst) - # clean directory - else: - print(f"cleaning...{dst}") - for root, dirs, files in os.walk(dst): - for f in files: - tpth = os.path.join(root, f) - print(f" removing...{tpth}") - os.remove(tpth) - for d in dirs: - tdir = os.path.join(root, d) - print(f" removing...{tdir}") - shutil.rmtree(tdir) - # copy files - cmppth = os.path.join(src, action) - files = os.listdir(cmppth) - files2copy = [] - if action.lower() == "compare" or action.lower() == ".cmp": - for file in files: - if ".cmp" in os.path.splitext(file)[1].lower(): - files2copy.append(os.path.join(cmppth, file)) - for srcf in files2copy: - f = os.path.basename(srcf) - dstf = os.path.join(dst, f) - # Now copy the file - if os.path.exists(srcf): - print("Copy file '" + srcf + "' -> '" + dstf + "'") - shutil.copy(srcf, dstf) - else: - print(srcf + " does not exist") - else: - if "mf6" in action.lower(): - for file in files: - if "mfsim.nam" in file.lower(): - srcf = os.path.join(cmppth, os.path.basename(file)) - files2copy.append(srcf) - srcdir = os.path.join(src, action) - setup_mf6(srcdir, dst, remove_existing=remove_existing) - break - else: - for file in files: - if ".nam" in os.path.splitext(file)[1].lower(): - srcf = os.path.join(cmppth, os.path.basename(file)) - files2copy.append(srcf) - nf = os.path.join(src, action, os.path.basename(file)) - model_setup(nf, dst, remove_existing=remove_existing) - break - - return action - - -def get_mf6_nper(tdisfile): - """Return the number of stress periods in the MODFLOW 6 model. - - Parameters - ---------- - tdisfile : str - path to the TDIS file - - Returns - ------- - nper : int - number of stress periods in the simulation - - """ - with open(tdisfile, "r") as f: - lines = f.readlines() - line = [line for line in lines if "NPER" in line.upper()][0] - nper = line.strip().split()[1] - return nper - - -def get_mf6_mshape(disfile): - """Return the shape of the MODFLOW 6 model. - - Parameters - ---------- - disfile : str - path to a MODFLOW 6 discretization file - - Returns - ------- - mshape : tuple - tuple with the shape of the MODFLOW 6 model. - - """ - with open(disfile, "r") as f: - lines = f.readlines() - - d = {} - for line in lines: - - # Skip over blank and commented lines - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - - for key in ["NODES", "NCPL", "NLAY", "NROW", "NCOL"]: - if ll[0].upper() in key: - d[key] = int(ll[1]) - - if "NODES" in d: - mshape = (d["NODES"],) - elif "NCPL" in d: - mshape = (d["NLAY"], d["NCPL"]) - elif "NLAY" in d: - mshape = (d["NLAY"], d["NROW"], d["NCOL"]) - else: - print(d) - raise Exception("Could not determine model shape") - return mshape - - -def get_mf6_files(mfnamefile): - """Return a list of all the MODFLOW 6 input and output files in this model. - - Parameters - ---------- - mfnamefile : str - path to the MODFLOW 6 simulation name file - - Returns - ------- - filelist : list - list of MODFLOW 6 input files in a simulation - outplist : list - list of MODFLOW 6 output files in a simulation - - """ - - srcdir = os.path.dirname(mfnamefile) - filelist = [] - outplist = [] - - filekeys = ["TDIS6", "GWF6", "GWT", "GWF6-GWF6", "GWF-GWT", "IMS6"] - namefilekeys = ["GWF6", "GWT"] - namefiles = [] - - with open(mfnamefile) as f: - - # Read line and skip comments - lines = f.readlines() - - for line in lines: - - # Skip over blank and commented lines - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - - for key in filekeys: - if key in ll[0].upper(): - fname = ll[1] - filelist.append(fname) - - for key in namefilekeys: - if key in ll[0].upper(): - fname = ll[1] - namefiles.append(fname) - - # Go through name files and get files - for namefile in namefiles: - fname = os.path.join(srcdir, namefile) - with open(fname, "r") as f: - lines = f.readlines() - insideblock = False - - for line in lines: - ll = line.upper().strip().split() - if len(ll) < 2: - continue - if ll[0] in "BEGIN" and ll[1] in "PACKAGES": - insideblock = True - continue - if ll[0] in "END" and ll[1] in "PACKAGES": - insideblock = False - - if insideblock: - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - filelist.append(ll[1]) - - # Recursively go through every file and look for other files to copy, - # such as 'OPEN/CLOSE' and 'TIMESERIESFILE'. If found, then - # add that file to the list of files to copy. - flist = filelist - # olist = outplist - while True: - olist = [] - flist, olist = _get_mf6_external_files(srcdir, olist, flist) - # add to filelist - if len(flist) > 0: - filelist = filelist + flist - # add to outplist - if len(olist) > 0: - outplist = outplist + olist - # terminate loop if no additional files - # if len(flist) < 1 and len(olist) < 1: - if len(flist) < 1: - break - - return filelist, outplist - - -def _get_mf6_external_files(srcdir, outplist, files): - """Get list of external files in a MODFLOW 6 simulation. - - Parameters - ---------- - srcdir : str - path to a directory containing a MODFLOW 6 simulation - outplist : list - list of output files in a MODFLOW 6 simulation - files : list - list of MODFLOW 6 name files - - Returns - ------- - - """ - extfiles = [] - - for fname in files: - fname = os.path.join(srcdir, fname) - try: - f = open(fname, "r") - for line in f: - - # Skip invalid lines - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - - if "OPEN/CLOSE" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "OPEN/CLOSE": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "TS6" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "FILEIN": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "TAS6" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "FILEIN": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "OBS6" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "FILEIN": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "EXTERNAL" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "EXTERNAL": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "FILE" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "FILEIN": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - extfiles.append(stmp) - break - - if "FILE" in line.upper(): - for i, s in enumerate(ll): - if s.upper() == "FILEOUT": - stmp = ll[i + 1] - stmp = stmp.replace('"', "") - stmp = stmp.replace("'", "") - outplist.append(stmp) - break - - except: - print("could not get a list of external mf6 files") - - return extfiles, outplist - - -def get_mf6_ftypes(namefile, ftypekeys): - """Return a list of FTYPES that are in the name file and in ftypekeys. - - Parameters - ---------- - namefile : str - path to a MODFLOW 6 name file - ftypekeys : list - list of desired FTYPEs - - Returns - ------- - ftypes : list - list of FTYPES that match ftypekeys in namefile - - """ - with open(namefile, "r") as f: - lines = f.readlines() - - ftypes = [] - for line in lines: - - # Skip over blank and commented lines - ll = line.strip().split() - if len(ll) < 2: - continue - if line.strip()[0] in ["#", "!"]: - continue - - for key in ftypekeys: - if ll[0].upper() in key: - ftypes.append(ll[0]) - - return ftypes - - -def get_mf6_blockdata(f, blockstr): - """Return list with all non comments between start and end of block - specified by blockstr. - - Parameters - ---------- - f : file object - open file object - blockstr : str - name of block to search - - Returns - ------- - data : list - list of data in specified block - - """ - data = [] - - # find beginning of block - for line in f: - if line[0] != "#": - t = line.split() - if t[0].lower() == "begin" and t[1].lower() == blockstr.lower(): - break - for line in f: - if line[0] != "#": - t = line.split() - if t[0].lower() == "end" and t[1].lower() == blockstr.lower(): - break - else: - data.append(line.rstrip()) - return data - - -# compare functions -def compare_budget( - namefile1, - namefile2, - max_cumpd=0.01, - max_incpd=0.01, - outfile=None, - files1=None, - files2=None, -): - """Compare the budget results from two simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - max_cumpd : float - maximum percent discrepancy allowed for cumulative budget terms - (default is 0.01) - max_incpd : float - maximum percent discrepancy allowed for incremental budget terms - (default is 0.01) - outfile : str - budget comparison output file name. If outfile is None, no - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - - Returns - ------- - success : bool - boolean indicating if the difference between budgets are less - than max_cumpd and max_incpd - - """ - try: - import flopy - except: - msg = "flopy not available - cannot use compare_budget" - raise ValueError(msg) - - # headers - headers = ("INCREMENTAL", "CUMULATIVE") - direction = ("IN", "OUT") - - # Get name of list files - lst_file1 = None - if files1 is None: - lst_file = get_entries_from_namefile(namefile1, "list") - lst_file1 = lst_file[0][0] - else: - if isinstance(files1, str): - files1 = [files1] - for file in files1: - if ( - "list" in os.path.basename(file).lower() - or "lst" in os.path.basename(file).lower() - ): - lst_file1 = file - break - lst_file2 = None - if files2 is None: - lst_file = get_entries_from_namefile(namefile2, "list") - lst_file2 = lst_file[0][0] - else: - if isinstance(files2, str): - files2 = [files2] - for file in files2: - if ( - "list" in os.path.basename(file).lower() - or "lst" in os.path.basename(file).lower() - ): - lst_file2 = file - break - # Determine if there are two files to compare - if lst_file1 is None or lst_file2 is None: - print("lst_file1 or lst_file2 is None") - print(f"lst_file1: {lst_file1}") - print(f"lst_file2: {lst_file2}") - return True - - # Open output file - if outfile is not None: - f = open(outfile, "w") - f.write("Created by pymake.autotest.compare\n") - - # Initialize SWR budget objects - lst1obj = flopy.utils.MfusgListBudget(lst_file1) - lst2obj = flopy.utils.MfusgListBudget(lst_file2) - - # Determine if there any SWR entries in the budget file - if not lst1obj.isvalid() or not lst2obj.isvalid(): - return True - - # Get numpy budget tables for lst_file1 - lst1 = [] - lst1.append(lst1obj.get_incremental()) - lst1.append(lst1obj.get_cumulative()) - - # Get numpy budget tables for lst_file2 - lst2 = [] - lst2.append(lst2obj.get_incremental()) - lst2.append(lst2obj.get_cumulative()) - - icnt = 0 - v0 = np.zeros(2, dtype=float) - v1 = np.zeros(2, dtype=float) - err = np.zeros(2, dtype=float) - - # Process cumulative and incremental - for idx in range(2): - if idx > 0: - max_pd = max_cumpd - else: - max_pd = max_incpd - kper = lst1[idx]["stress_period"] - kstp = lst1[idx]["time_step"] - - # Process each time step - for jdx in range(kper.shape[0]): - - err[:] = 0.0 - t0 = lst1[idx][jdx] - t1 = lst2[idx][jdx] - - if outfile is not None: - - maxcolname = 0 - for colname in t0.dtype.names: - maxcolname = max(maxcolname, len(colname)) - - s = 2 * "\n" - s += ( - f"STRESS PERIOD: {kper[jdx] + 1} " - + f"TIME STEP: {kstp[jdx] + 1}" - ) - f.write(s) - - if idx == 0: - f.write("\nINCREMENTAL BUDGET\n") - else: - f.write("\nCUMULATIVE BUDGET\n") - - for i, colname in enumerate(t0.dtype.names): - if i == 0: - s = ( - f"{'Budget Entry':<21} {'Model 1':>21} " - + f"{'Model 2':>21} {'Difference':>21}\n" - ) - f.write(s) - s = 87 * "-" + "\n" - f.write(s) - diff = t0[colname] - t1[colname] - s = ( - f"{colname:<21} {t0[colname]:>21} " - + f"{t1[colname]:>21} {diff:>21}\n" - ) - f.write(s) - - v0[0] = t0["TOTAL_IN"] - v1[0] = t1["TOTAL_IN"] - if v0[0] > 0.0: - err[0] = 100.0 * (v1[0] - v0[0]) / v0[0] - v0[1] = t0["TOTAL_OUT"] - v1[1] = t1["TOTAL_OUT"] - if v0[1] > 0.0: - err[1] = 100.0 * (v1[1] - v0[1]) / v0[1] - for kdx, t in enumerate(err): - if abs(t) > max_pd: - icnt += 1 - if outfile is not None: - e = ( - f'"{headers[idx]} {direction[kdx]}" ' - + f"percent difference ({t})" - + f" for stress period {kper[jdx] + 1} " - + f"and time step {kstp[jdx] + 1} > {max_pd}." - + f" Reference value = {v0[kdx]}. " - + f"Simulated value = {v1[kdx]}." - ) - e = textwrap.fill( - e, - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - f.write("\n") - - # Close output file - if outfile is not None: - f.close() - - # test for failure - success = True - if icnt > 0: - success = False - return success - - -def compare_swrbudget( - namefile1, - namefile2, - max_cumpd=0.01, - max_incpd=0.01, - outfile=None, - files1=None, - files2=None, -): - """Compare the SWR budget results from two simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - max_cumpd : float - maximum percent discrepancy allowed for cumulative budget terms - (default is 0.01) - max_incpd : float - maximum percent discrepancy allowed for incremental budget terms - (default is 0.01) - outfile : str - budget comparison output file name. If outfile is None, no - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - - Returns - ------- - success : bool - boolean indicating if the difference between budgets are less - than max_cumpd and max_incpd - - """ - try: - import flopy - except: - msg = "flopy not available - cannot use compare_swrbudget" - raise ValueError(msg) - - # headers - headers = ("INCREMENTAL", "CUMULATIVE") - direction = ("IN", "OUT") - - # Get name of list files - list1 = None - if files1 is None: - lst = get_entries_from_namefile(namefile1, "list") - list1 = lst[0][0] - else: - for file in files1: - if ( - "list" in os.path.basename(file).lower() - or "lst" in os.path.basename(file).lower() - ): - list1 = file - break - list2 = None - if files2 is None: - lst = get_entries_from_namefile(namefile2, "list") - list2 = lst[0][0] - else: - for file in files2: - if ( - "list" in os.path.basename(file).lower() - or "lst" in os.path.basename(file).lower() - ): - list2 = file - break - # Determine if there are two files to compare - if list1 is None or list2 is None: - return True - - # Initialize SWR budget objects - lst1obj = flopy.utils.SwrListBudget(list1) - lst2obj = flopy.utils.SwrListBudget(list2) - - # Determine if there any SWR entries in the budget file - if not lst1obj.isvalid() or not lst2obj.isvalid(): - return True - - # Get numpy budget tables for list1 - lst1 = [] - lst1.append(lst1obj.get_incremental()) - lst1.append(lst1obj.get_cumulative()) - - # Get numpy budget tables for list2 - lst2 = [] - lst2.append(lst2obj.get_incremental()) - lst2.append(lst2obj.get_cumulative()) - - icnt = 0 - v0 = np.zeros(2, dtype=float) - v1 = np.zeros(2, dtype=float) - err = np.zeros(2, dtype=float) - - # Open output file - if outfile is not None: - f = open(outfile, "w") - f.write("Created by pymake.autotest.compare\n") - - # Process cumulative and incremental - for idx in range(2): - if idx > 0: - max_pd = max_cumpd - else: - max_pd = max_incpd - kper = lst1[idx]["stress_period"] - kstp = lst1[idx]["time_step"] - - # Process each time step - for jdx in range(kper.shape[0]): - - err[:] = 0.0 - t0 = lst1[idx][jdx] - t1 = lst2[idx][jdx] - - if outfile is not None: - - maxcolname = 0 - for colname in t0.dtype.names: - maxcolname = max(maxcolname, len(colname)) - - s = 2 * "\n" - s += ( - f"STRESS PERIOD: {kper[jdx] + 1} " - + f"TIME STEP: {kstp[jdx] + 1}" - ) - f.write(s) - - if idx == 0: - f.write("\nINCREMENTAL BUDGET\n") - else: - f.write("\nCUMULATIVE BUDGET\n") - - for i, colname in enumerate(t0.dtype.names): - if i == 0: - s = ( - f"{'Budget Entry':<21} {'Model 1':>21} " - + f"{'Model 2':>21} {'Difference':>21}\n" - ) - f.write(s) - s = 87 * "-" + "\n" - f.write(s) - diff = t0[colname] - t1[colname] - s = ( - f"{colname:<21} {t0[colname]:>21} " - + f"{t1[colname]:>21} {diff:>21}\n" - ) - f.write(s) - - v0[0] = t0["TOTAL_IN"] - v1[0] = t1["TOTAL_IN"] - if v0[0] > 0.0: - err[0] = 100.0 * (v1[0] - v0[0]) / v0[0] - v0[1] = t0["TOTAL_OUT"] - v1[1] = t1["TOTAL_OUT"] - if v0[1] > 0.0: - err[1] = 100.0 * (v1[1] - v0[1]) / v0[1] - for kdx, t in enumerate(err): - if abs(t) > max_pd: - icnt += 1 - e = ( - f'"{headers[idx]} {direction[kdx]}" ' - + f"percent difference ({t})" - + f" for stress period {kper[jdx] + 1} " - + f"and time step {kstp[jdx] + 1} > {max_pd}." - + f" Reference value = {v0[kdx]}. " - + f"Simulated value = {v1[kdx]}." - ) - e = textwrap.fill( - e, - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - f.write("\n") - - # Close output file - if outfile is not None: - f.close() - - # test for failure - success = True - if icnt > 0: - success = False - return success - - -def compare_heads( - namefile1, - namefile2, - precision="auto", - text="head", - text2=None, - htol=0.001, - outfile=None, - files1=None, - files2=None, - difftol=False, - verbose=False, - exfile=None, - exarr=None, - maxerr=None, -): - """Compare the head results from two simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - precision : str - precision for binary head file ("auto", "single", or "double") - default is "auto" - htol : float - maximum allowed head difference (default is 0.001) - outfile : str - head comparison output file name. If outfile is None, no - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - difftol : bool - boolean determining if the absolute value of the head - difference greater than htol should be evaluated (default is False) - verbose : bool - boolean indicating if verbose output should be written to the - terminal (default is False) - exfile : str - path to a file with exclusion array data. Head differences will not - be evaluated where exclusion array values are greater than zero. - (default is None) - exarr : numpy.ndarry - exclusion array. Head differences will not be evaluated where - exclusion array values are greater than zero. (default is None). - maxerr : int - maximum number of head difference greater than htol that should be - reported. If maxerr is None, all head difference greater than htol - will be reported. (default is None) - - Returns - ------- - success : bool - boolean indicating if the head differences are less than htol. - - """ - try: - import flopy - except: - msg = "flopy not available - cannot use compare_heads" - raise ValueError(msg) - - if text2 is None: - text2 = text - - dbs = "DATA(BINARY)" - - # Get head info for namefile1 - hfpth1 = None - status1 = dbs - if files1 is None: - # Get oc info, and return if OC not included in models - ocf1 = get_entries_from_namefile(namefile1, "OC") - if ocf1[0][0] is None: - return True - - hu1, hfpth1, du1, _ = flopy.modflow.ModflowOc.get_ocoutput_units( - ocf1[0][0] - ) - if text.lower() == "head": - iut = hu1 - elif text.lower() == "drawdown": - iut = du1 - if iut != 0: - entries = get_entries_from_namefile(namefile1, unit=abs(iut)) - hfpth1, status1 = entries[0][0], entries[0][1] - - else: - if isinstance(files1, str): - files1 = [files1] - for file in files1: - if text.lower() == "head": - if ( - "hds" in os.path.basename(file).lower() - or "hed" in os.path.basename(file).lower() - ): - hfpth1 = file - break - elif text.lower() == "drawdown": - if "ddn" in os.path.basename(file).lower(): - hfpth1 = file - break - elif text.lower() == "concentration": - if "ucn" in os.path.basename(file).lower(): - hfpth1 = file - break - else: - hfpth1 = file - break - - # Get head info for namefile2 - hfpth2 = None - status2 = dbs - if files2 is None: - # Get oc info, and return if OC not included in models - ocf2 = get_entries_from_namefile(namefile2, "OC") - if ocf2[0][0] is None: - return True - - hu2, hfpth2, du2, dfpth2 = flopy.modflow.ModflowOc.get_ocoutput_units( - ocf2[0][0] - ) - if text.lower() == "head": - iut = hu2 - elif text.lower() == "drawdown": - iut = du2 - if iut != 0: - entries = get_entries_from_namefile(namefile2, unit=abs(iut)) - hfpth2, status2 = entries[0][0], entries[0][1] - else: - if isinstance(files2, str): - files2 = [files2] - for file in files2: - if text2.lower() == "head": - if ( - "hds" in os.path.basename(file).lower() - or "hed" in os.path.basename(file).lower() - ): - hfpth2 = file - break - elif text2.lower() == "drawdown": - if "ddn" in os.path.basename(file).lower(): - hfpth2 = file - break - elif text2.lower() == "concentration": - if "ucn" in os.path.basename(file).lower(): - hfpth2 = file - break - else: - hfpth2 = file - break - - # confirm that there are two files to compare - if hfpth1 is None or hfpth2 is None: - print("hfpth1 or hfpth2 is None") - print(f"hfpth1: {hfpth1}") - print(f"hfpth2: {hfpth2}") - return True - - # make sure the file paths exist - if not os.path.isfile(hfpth1) or not os.path.isfile(hfpth2): - print("hfpth1 or hfpth2 is not a file") - print(f"hfpth1 isfile: {os.path.isfile(hfpth1)}") - print(f"hfpth2 isfile: {os.path.isfile(hfpth2)}") - return False - - # Open output file - if outfile is not None: - f = open(outfile, "w") - f.write("Created by pymake.autotest.compare\n") - f.write(f"Performing {text.upper()} to {text2.upper()} comparison\n") - - if exfile is not None: - f.write(f"Using exclusion file {exfile}\n") - if exarr is not None: - f.write("Using exclusion array\n") - - msg = f"{hfpth1} is a " - if status1 == dbs: - msg += "binary file." - else: - msg += "ascii file." - f.write(msg + "\n") - msg = f"{hfpth2} is a " - if status2 == dbs: - msg += "binary file." - else: - msg += "ascii file." - f.write(msg + "\n") - - # Process exclusion data - exd = None - # get data from exclusion file - if exfile is not None: - e = None - if isinstance(exfile, str): - try: - exd = np.genfromtxt(exfile).flatten() - except: - e = ( - "Could not read exclusion " - + f"file {os.path.basename(exfile)}" - ) - print(e) - return False - else: - e = "exfile is not a valid file path" - print(e) - return False - - # process exclusion array - if exarr is not None: - e = None - if isinstance(exarr, np.ndarray): - if exd is None: - exd = exarr.flatten() - else: - exd += exarr.flatten() - else: - e = "exarr is not a numpy array" - print(e) - return False - - # Get head objects - status1 = status1.upper() - unstructured1 = False - if status1 == dbs: - headobj1 = flopy.utils.HeadFile( - hfpth1, precision=precision, verbose=verbose, text=text - ) - txt = headobj1.recordarray["text"][0] - if isinstance(txt, bytes): - txt = txt.decode("utf-8") - if "HEADU" in txt: - unstructured1 = True - headobj1 = flopy.utils.HeadUFile( - hfpth1, precision=precision, verbose=verbose - ) - else: - headobj1 = flopy.utils.FormattedHeadFile( - hfpth1, verbose=verbose, text=text - ) - - status2 = status2.upper() - unstructured2 = False - if status2 == dbs: - headobj2 = flopy.utils.HeadFile( - hfpth2, precision=precision, verbose=verbose, text=text2 - ) - txt = headobj2.recordarray["text"][0] - if isinstance(txt, bytes): - txt = txt.decode("utf-8") - if "HEADU" in txt: - unstructured2 = True - headobj2 = flopy.utils.HeadUFile( - hfpth2, precision=precision, verbose=verbose - ) - else: - headobj2 = flopy.utils.FormattedHeadFile( - hfpth2, verbose=verbose, text=text2 - ) - - # get times - times1 = headobj1.get_times() - times2 = headobj2.get_times() - for (t1, t2) in zip(times1, times2): - if not np.allclose([t1], [t2]): - msg = "times in two head files are not " + f"equal ({t1},{t2})" - raise ValueError(msg) - - kstpkper = headobj1.get_kstpkper() - - line_separator = 15 * "-" - header = ( - f"{' ':>15s} {' ':>15s} {'MAXIMUM':>15s} {'EXCEEDS':>15s}\n" - + f"{'STRESS PERIOD':>15s} {'TIME STEP':>15s} " - + f"{'HEAD DIFFERENCE':>15s} {'CRITERIA':>15s}\n" - + f"{line_separator:>15s} {line_separator:>15s} " - + f"{line_separator:>15s} {line_separator:>15s}\n" - ) - - if verbose: - print(f"Comparing results for {len(times1)} times") - - icnt = 0 - # Process cumulative and incremental - for idx, (t1, t2) in enumerate(zip(times1, times2)): - h1 = headobj1.get_data(totim=t1) - if unstructured1: - temp = np.array([]) - for a in h1: - temp = np.hstack((temp, a)) - h1 = temp - h2 = headobj2.get_data(totim=t2) - if unstructured2: - temp = np.array([]) - for a in h2: - temp = np.hstack((temp, a)) - h2 = temp - - if exd is not None: - # reshape exd to the shape of the head arrays - if idx == 0: - e = ( - f"shape of exclusion data ({exd.shape})" - + "can not be reshaped to the size of the " - + f"head arrays ({h1.shape})" - ) - if h1.flatten().shape != exd.shape: - raise ValueError(e) - exd = exd.reshape(h1.shape) - iexd = exd > 0 - - # reset h1 and h2 to the same value in the excluded area - h1[iexd] = 0.0 - h2[iexd] = 0.0 - - if difftol: - diffmax, indices = _calculate_difftol(h1, h2, htol) - else: - diffmax, indices = _calculate_diffmax(h1, h2) - - if outfile is not None: - if idx < 1: - f.write(header) - if diffmax > htol: - sexceed = "*" - else: - sexceed = "" - kk1 = kstpkper[idx][1] + 1 - kk0 = kstpkper[idx][0] + 1 - f.write(f"{kk1:15d} {kk0:15d} {diffmax:15.6g} {sexceed:15s}\n") - - if diffmax >= htol: - icnt += 1 - if outfile is not None: - if difftol: - ee = ( - "Maximum absolute head difference " - + f"({diffmax}) -- " - + f"{htol} tolerance exceeded at " - + f"{indices[0].shape[0]} node location(s)" - ) - else: - ee = ( - "Maximum absolute head difference " - + f"({diffmax}) exceeded " - + f"at {indices[0].shape[0]} node location(s)" - ) - e = textwrap.fill( - ee + ":", - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - - if verbose: - f.write(f"{ee}\n") - print(ee + f" at time {t1}") - - e = "" - ncells = h1.flatten().shape[0] - fmtn = "{:" + f"{len(str(ncells))}" + "d}" - for itupe in indices: - for jdx, ind in enumerate(itupe): - iv = np.unravel_index(ind, h1.shape) - iv = tuple(i + 1 for i in iv) - v1 = h1.flatten()[ind] - v2 = h2.flatten()[ind] - d12 = v1 - v2 - # e += ' ' + fmtn.format(jdx + 1) + ' node: ' - # e += fmtn.format(ind + 1) # convert to one-based - e += " " + fmtn.format(jdx + 1) - e += f" {iv}" - e += " -- " - e += f"h1: {v1:20} " - e += f"h2: {v2:20} " - e += f"diff: {d12:20}\n" - if isinstance(maxerr, int): - if jdx + 1 >= maxerr: - break - if verbose: - f.write(f"{e}\n") - # Write header again, unless it is the last record - if verbose: - if idx + 1 < len(times1): - f.write(f"\n{header}") - - # Close output file - if outfile is not None: - f.close() - - # test for failure - success = True - if icnt > 0: - success = False - return success - - -def compare_concs( - namefile1, - namefile2, - precision="auto", - ctol=0.001, - outfile=None, - files1=None, - files2=None, - difftol=False, - verbose=False, -): - """Compare the mt3dms and mt3dusgs concentration results from two - simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - precision : str - precision for binary head file ("auto", "single", or "double") - default is "auto" - ctol : float - maximum allowed concentration difference (default is 0.001) - outfile : str - concentration comparison output file name. If outfile is None, no - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - difftol : bool - boolean determining if the absolute value of the concentration - difference greater than ctol should be evaluated (default is False) - verbose : bool - boolean indicating if verbose output should be written to the - terminal (default is False) - - Returns - ------- - success : bool - boolean indicating if the concentration differences are less than - ctol. - - Returns - ------- - - """ - try: - import flopy - except: - msg = "flopy not available - cannot use compare_concs" - raise ValueError(msg) - - # list of valid extensions - valid_ext = ["ucn"] - - # Get info for first ucn file - ufpth1 = None - if files1 is None: - for ext in valid_ext: - ucn = get_entries_from_namefile(namefile1, extension=ext) - ufpth = ucn[0][0] - if ufpth is not None: - ufpth1 = ufpth - break - if ufpth1 is None: - ufpth1 = os.path.join(os.path.dirname(namefile1), "MT3D001.UCN") - else: - if isinstance(files1, str): - files1 = [files1] - for file in files1: - for ext in valid_ext: - if ext in os.path.basename(file).lower(): - ufpth1 = file - break - - # Get info for second ucn file - ufpth2 = None - if files2 is None: - for ext in valid_ext: - ucn = get_entries_from_namefile(namefile2, extension=ext) - ufpth = ucn[0][0] - if ufpth is not None: - ufpth2 = ufpth - break - if ufpth2 is None: - ufpth2 = os.path.join(os.path.dirname(namefile2), "MT3D001.UCN") - else: - if isinstance(files2, str): - files2 = [files2] - for file in files2: - for ext in valid_ext: - if ext in os.path.basename(file).lower(): - ufpth2 = file - break - - # confirm that there are two files to compare - if ufpth1 is None or ufpth2 is None: - if ufpth1 is None: - print(" UCN file 1 not set") - if ufpth2 is None: - print(" UCN file 2 not set") - return True - - if not os.path.isfile(ufpth1) or not os.path.isfile(ufpth2): - if not os.path.isfile(ufpth1): - print(f" {ufpth1} does not exist") - if not os.path.isfile(ufpth2): - print(f" {ufpth2} does not exist") - return True - - # Open output file - if outfile is not None: - f = open(outfile, "w") - f.write("Created by pymake.autotest.compare_concs\n") - - # Get stage objects - uobj1 = flopy.utils.UcnFile(ufpth1, precision=precision, verbose=verbose) - uobj2 = flopy.utils.UcnFile(ufpth2, precision=precision, verbose=verbose) - - # get times - times1 = uobj1.get_times() - times2 = uobj2.get_times() - nt1 = len(times1) - nt2 = len(times2) - nt = min(nt1, nt2) - - for (t1, t2) in zip(times1, times2): - if not np.allclose([t1], [t2]): - msg = f"times in two ucn files are not equal ({t1},{t2})" - raise ValueError(msg) - - if nt == nt1: - kstpkper = uobj1.get_kstpkper() - else: - kstpkper = uobj2.get_kstpkper() - - line_separator = 15 * "-" - header = ( - f"{' ':>15s} {' ':>15s} {'MAXIMUM':>15s}\n" - + f"{'STRESS PERIOD':>15s} {'TIME STEP':>15s} " - + f"{'CONC DIFFERENCE':>15s}\n" - + f"{line_separator:>15s} " - + f"{line_separator:>15s} " - + f"{line_separator:>15s}\n" - ) - - if verbose: - print(f"Comparing results for {len(times1)} times") - - icnt = 0 - # Process cumulative and incremental - for idx, time in enumerate(times1[0:nt]): - try: - u1 = uobj1.get_data(totim=time) - u2 = uobj2.get_data(totim=time) - - if difftol: - diffmax, indices = _calculate_difftol(u1, u2, ctol) - else: - diffmax, indices = _calculate_diffmax(u1, u2) - - if outfile is not None: - if idx < 1: - f.write(header) - f.write( - f"{kstpkper[idx][1] + 1:15d} " - + f"{kstpkper[idx][0] + 1:15d} " - + f"{diffmax:15.6g}\n" - ) - - if diffmax >= ctol: - icnt += 1 - if outfile is not None: - if difftol: - ee = ( - f"Maximum concentration difference ({diffmax})" - + f" -- {ctol} tolerance exceeded at " - + f"{indices[0].shape[0]} node location(s)" - ) - else: - ee = ( - "Maximum concentration difference " - + f"({diffmax}) exceeded " - + f"at {indices[0].shape[0]} node location(s)" - ) - e = textwrap.fill( - ee + ":", - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - if verbose: - print(ee + f" at time {time}") - e = "" - for itupe in indices: - for ind in itupe: - e += f"{ind + 1} " # convert to one-based - e = textwrap.fill( - e, - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - # Write header again, unless it is the last record - if idx + 1 < len(times1): - f.write(f"\n{header}") - except: - print(f" could not process time={time}") - print(" terminating ucn processing...") - break - - # Close output file - if outfile is not None: - f.close() - - # test for failure - success = True - if icnt > 0: - success = False - return success - - -def compare_stages( - namefile1=None, - namefile2=None, - files1=None, - files2=None, - htol=0.001, - outfile=None, - difftol=False, - verbose=False, -): - """Compare SWR process stage results from two simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - precision : str - precision for binary head file ("auto", "single", or "double") - default is "auto" - htol : float - maximum allowed stage difference (default is 0.001) - outfile : str - head comparison output file name. If outfile is None, no - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - difftol : bool - boolean determining if the absolute value of the stage - difference greater than htol should be evaluated (default is False) - verbose : bool - boolean indicating if verbose output should be written to the - terminal (default is False) - - Returns - ------- - success : bool - boolean indicating if the stage differences are less than htol. - - """ - try: - import flopy - except: - msg = "flopy not available - cannot use compare_stages" - raise ValueError(msg) - - # list of valid extensions - valid_ext = ["stg"] - - # Get info for first stage file - sfpth1 = None - if namefile1 is not None: - for ext in valid_ext: - stg = get_entries_from_namefile(namefile1, extension=ext) - sfpth = stg[0][0] - if sfpth is not None: - sfpth1 = sfpth - break - elif files1 is not None: - if isinstance(files1, str): - files1 = [files1] - for file in files1: - for ext in valid_ext: - if ext in os.path.basename(file).lower(): - sfpth1 = file - break - - # Get info for second stage file - sfpth2 = None - if namefile2 is not None: - for ext in valid_ext: - stg = get_entries_from_namefile(namefile2, extension=ext) - sfpth = stg[0][0] - if sfpth is not None: - sfpth2 = sfpth - break - elif files2 is not None: - if isinstance(files2, str): - files2 = [files2] - for file in files2: - for ext in valid_ext: - if ext in os.path.basename(file).lower(): - sfpth2 = file - break - - # confirm that there are two files to compare - if sfpth1 is None or sfpth2 is None: - print("spth1 or spth2 is None") - print(f"spth1: {sfpth1}") - print(f"spth2: {sfpth2}") - return False - - if not os.path.isfile(sfpth1) or not os.path.isfile(sfpth2): - print("spth1 or spth2 is not a file") - print(f"spth1 isfile: {os.path.isfile(sfpth1)}") - print(f"spth2 isfile: {os.path.isfile(sfpth2)}") - return False - - # Open output file - if outfile is not None: - f = open(outfile, "w") - f.write("Created by pymake.autotest.compare_stages\n") - - # Get stage objects - sobj1 = flopy.utils.SwrStage(sfpth1, verbose=verbose) - sobj2 = flopy.utils.SwrStage(sfpth2, verbose=verbose) - - # get totim - times1 = sobj1.get_times() - - # get kswr, kstp, and kper - kk = sobj1.get_kswrkstpkper() - - line_separator = 15 * "-" - header = ( - f"{' ':>15s} {' ':>15s} {' ':>15s} {'MAXIMUM':>15s}\n" - + f"{'STRESS PERIOD':>15s} " - + f"{'TIME STEP':>15s} " - + f"{'SWR TIME STEP':>15s} " - + f"{'STAGE DIFFERENCE':>15s}\n" - + f"{line_separator:>15s} " - + f"{line_separator:>15s} " - + f"{line_separator:>15s} " - + f"{line_separator:>15s}\n" - ) - - if verbose: - print(f"Comparing results for {len(times1)} times") - - icnt = 0 - # Process stage data - for idx, (kon, time) in enumerate(zip(kk, times1)): - s1 = sobj1.get_data(totim=time) - s2 = sobj2.get_data(totim=time) - - if s1 is None or s2 is None: - continue - - s1 = s1["stage"] - s2 = s2["stage"] - - if difftol: - diffmax, indices = _calculate_difftol(s1, s2, htol) - else: - diffmax, indices = _calculate_diffmax(s1, s2) - - if outfile is not None: - if idx < 1: - f.write(header) - f.write( - f"{kon[2] + 1:15d} " - + f"{kon[1] + 1:15d} " - + f"{kon[0] + 1:15d} " - + f"{diffmax:15.6g}\n" - ) - - if diffmax >= htol: - icnt += 1 - if outfile is not None: - if difftol: - ee = ( - f"Maximum head difference ({diffmax}) -- " - + f"{htol} tolerance exceeded at " - + f"{indices[0].shape[0]} node location(s)" - ) - else: - ee = ( - "Maximum head difference " - + f"({diffmax}) exceeded " - + f"at {indices[0].shape[0]} node location(s):" - ) - e = textwrap.fill( - ee + ":", - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - if verbose: - print(ee + f" at time {time}") - e = "" - for itupe in indices: - for ind in itupe: - e += f"{ind + 1} " # convert to one-based - e = textwrap.fill( - e, - width=70, - initial_indent=" ", - subsequent_indent=" ", - ) - f.write(f"{e}\n") - # Write header again, unless it is the last record - if idx + 1 < len(times1): - f.write(f"\n{header}") - - # Close output file - if outfile is not None: - f.close() - - # test for failure - success = True - if icnt > 0: - success = False - return success - - -def _calculate_diffmax(v1, v2): - """Calculate the maximum difference between two vectors. - - Parameters - ---------- - v1 : numpy.ndarray - array of base model results - v2 : numpy.ndarray - array of comparison model results - - Returns - ------- - diffmax : float - absolute value of the maximum difference in v1 and v2 array values - indices : numpy.ndarry - indices where the absolute value of the difference is equal to the - absolute value of the maximum difference. - - """ - if v1.ndim > 1 or v2.ndim > 1: - v1 = v1.flatten() - v2 = v2.flatten() - if v1.size != v2.size: - err = ( - f"Error: calculate_difference v1 size ({v1.size}) " - + f"is not equal to v2 size ({v2.size})" - ) - raise Exception(err) - - diff = abs(v1 - v2) - diffmax = diff.max() - return diffmax, np.where(diff == diffmax) - - -def _calculate_difftol(v1, v2, tol): - """Calculate the difference between two arrays relative to a tolerance. - - Parameters - ---------- - v1 : numpy.ndarray - array of base model results - v2 : numpy.ndarray - array of comparison model results - tol : float - tolerance used to evaluate base and comparison models - - Returns - ------- - diffmax : float - absolute value of the maximum difference in v1 and v2 array values - indices : numpy.ndarry - indices where the absolute value of the difference exceed the - specified tolerance. - - """ - if v1.ndim > 1 or v2.ndim > 1: - v1 = v1.flatten() - v2 = v2.flatten() - if v1.size != v2.size: - err = ( - f"Error: calculate_difference v1 size ({v1.size}) " - + f"is not equal to v2 size ({v2.size})" - ) - raise Exception(err) - - diff = abs(v1 - v2) - return diff.max(), np.where(diff > tol) - - -def compare( - namefile1, - namefile2, - precision="auto", - max_cumpd=0.01, - max_incpd=0.01, - htol=0.001, - outfile1=None, - outfile2=None, - files1=None, - files2=None, -): - """Compare the budget and head results for two MODFLOW-based model - simulations. - - Parameters - ---------- - namefile1 : str - namefile path for base model - namefile2 : str - namefile path for comparison model - precision : str - precision for binary head file ("auto", "single", or "double") - default is "auto" - max_cumpd : float - maximum percent discrepancy allowed for cumulative budget terms - (default is 0.01) - max_incpd : float - maximum percent discrepancy allowed for incremental budget terms - (default is 0.01) - htol : float - maximum allowed head difference (default is 0.001) - outfile1 : str - budget comparison output file name. If outfile1 is None, no budget - comparison output is saved. (default is None) - outfile2 : str - head comparison output file name. If outfile2 is None, no head - comparison output is saved. (default is None) - files1 : str - base model output file. If files1 is not None, results - will be extracted from files1 and namefile1 will not be used. - (default is None) - files2 : str - comparison model output file. If files2 is not None, results - will be extracted from files2 and namefile2 will not be used. - (default is None) - - Returns - ------- - success : bool - boolean indicating if the budget and head differences are less than - max_cumpd, max_incpd, and htol. - - """ - - # Compare budgets from the list files in namefile1 and namefile2 - success1 = compare_budget( - namefile1, - namefile2, - max_cumpd=max_cumpd, - max_incpd=max_incpd, - outfile=outfile1, - files1=files1, - files2=files2, - ) - success2 = compare_heads( - namefile1, - namefile2, - precision=precision, - htol=htol, - outfile=outfile2, - files1=files1, - files2=files2, - ) - success = False - if success1 and success2: - success = True - return success diff --git a/modflow_devtools/utilities/__init__.py b/modflow_devtools/utilities/__init__.py deleted file mode 100644 index 143f486..0000000 --- a/modflow_devtools/utilities/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# __init__.py diff --git a/modflow_devtools/utilities/binary_file_writer.py b/modflow_devtools/utilities/binary_file_writer.py deleted file mode 100644 index 5d8771a..0000000 --- a/modflow_devtools/utilities/binary_file_writer.py +++ /dev/null @@ -1,202 +0,0 @@ -import numpy as np - - -def write_head( - fbin, - data, - kstp=1, - kper=1, - pertim=1.0, - totim=1.0, - text=" HEAD", - ilay=1, -): - dt = np.dtype( - [ - ("kstp", np.int32), - ("kper", np.int32), - ("pertim", np.float64), - ("totim", np.float64), - ("text", "S16"), - ("ncol", np.int32), - ("nrow", np.int32), - ("ilay", np.int32), - ] - ) - nrow = data.shape[0] - ncol = data.shape[1] - h = np.array((kstp, kper, pertim, totim, text, ncol, nrow, ilay), dtype=dt) - h.tofile(fbin) - data.tofile(fbin) - return - - -def write_budget( - fbin, - data, - kstp=1, - kper=1, - text=" FLOW-JA-FACE", - imeth=1, - delt=1.0, - pertim=1.0, - totim=1.0, - text1id1=" GWF-1", - text2id1=" GWF-1", - text1id2=" GWF-1", - text2id2=" NPF", -): - dt = np.dtype( - [ - ("kstp", np.int32), - ("kper", np.int32), - ("text", "S16"), - ("ndim1", np.int32), - ("ndim2", np.int32), - ("ndim3", np.int32), - ("imeth", np.int32), - ("delt", np.float64), - ("pertim", np.float64), - ("totim", np.float64), - ] - ) - - if imeth == 1: - ndim1 = data.shape[0] - ndim2 = 1 - ndim3 = -1 - h = np.array( - ( - kstp, - kper, - text, - ndim1, - ndim2, - ndim3, - imeth, - delt, - pertim, - totim, - ), - dtype=dt, - ) - h.tofile(fbin) - data.tofile(fbin) - - elif imeth == 6: - ndim1 = 1 - ndim2 = 1 - ndim3 = -1 - h = np.array( - ( - kstp, - kper, - text, - ndim1, - ndim2, - ndim3, - imeth, - delt, - pertim, - totim, - ), - dtype=dt, - ) - h.tofile(fbin) - - # write text1id1, ... - dt = np.dtype( - [ - ("text1id1", "S16"), - ("text1id2", "S16"), - ("text2id1", "S16"), - ("text2id2", "S16"), - ] - ) - h = np.array((text1id1, text1id2, text2id1, text2id2), dtype=dt) - h.tofile(fbin) - - # write ndat (number of floating point columns) - colnames = data.dtype.names - ndat = len(colnames) - 2 - dt = np.dtype([("ndat", np.int32)]) - h = np.array([(ndat,)], dtype=dt) - h.tofile(fbin) - - # write auxiliary column names - naux = ndat - 1 - if naux > 0: - auxtxt = [f"{colname:16}" for colname in colnames[3:]] - auxtxt = tuple(auxtxt) - dt = np.dtype([(colname, "S16") for colname in colnames[3:]]) - h = np.array(auxtxt, dtype=dt) - h.tofile(fbin) - - # write nlist - nlist = data.shape[0] - dt = np.dtype([("nlist", np.int32)]) - h = np.array([(nlist,)], dtype=dt) - h.tofile(fbin) - - # write the data - data.tofile(fbin) - - pass - else: - raise Exception(f"unknown method code {imeth}") - return - - -def uniform_flow_field(qx, qy, qz, shape, delr=None, delc=None, delv=None): - - nlay, nrow, ncol = shape - - # create spdis array for the uniform flow field - dt = np.dtype( - [ - ("ID1", np.int32), - ("ID2", np.int32), - ("FLOW", np.float64), - ("QX", np.float64), - ("QY", np.float64), - ("QZ", np.float64), - ] - ) - spdis = np.array( - [(id1, id1, 0.0, qx, qy, qz) for id1 in range(nlay * nrow * ncol)], - dtype=dt, - ) - - # create the flowja array for the uniform flow field (assume top-bot = 1) - flowja = [] - if delr is None: - delr = 1.0 - if delc is None: - delc = 1.0 - if delv is None: - delv = 1.0 - for k in range(nlay): - for i in range(nrow): - for j in range(ncol): - # diagonal - flowja.append(0.0) - # up - if k > 0: - flowja.append(-qz * delr * delc) - # back - if i > 0: - flowja.append(-qy * delr * delv) - # left - if j > 0: - flowja.append(qx * delc * delv) - # right - if j < ncol - 1: - flowja.append(-qx * delc * delv) - # front - if i < nrow - 1: - flowja.append(qy * delr * delv) - # bottom - if k < nlay - 1: - flowja.append(qz * delr * delc) - flowja = np.array(flowja, dtype=np.float64) - return spdis, flowja diff --git a/modflow_devtools/utilities/cross_section.py b/modflow_devtools/utilities/cross_section.py deleted file mode 100644 index 57a9759..0000000 --- a/modflow_devtools/utilities/cross_section.py +++ /dev/null @@ -1,276 +0,0 @@ -import numpy as np - -# power for Manning's hydraulic radius term -mpow = 2.0 / 3.0 - - -def calculate_rectchan_mannings_discharge( - conversion_factor, roughness, slope, width, depth -): - """ - Calculate Manning's discharge for a rectangular channel. - - """ - area = width * depth - return conversion_factor * area * depth**mpow * slope**0.5 / roughness - - -# n-point cross-section functions -def get_wetted_station( - x0, - x1, - h0, - h1, - depth, -): - """Get the wetted length in the x-direction""" - # -- calculate the minimum and maximum depth - hmin = min(h0, h1) - hmax = max(h0, h1) - - # -- if depth is less than or equal to the minimum value the - # station length (xlen) is zero - if depth <= hmin: - x1 = x0 - # -- if depth is between hmin and hmax, station length is less - # than h1 - h0 - elif depth < hmax: - xlen = x1 - x0 - dlen = h1 - h0 - if abs(dlen) > 0.0: - slope = xlen / dlen - else: - slope = 0.0 - if h0 > h1: - dx = (depth - h1) * slope - xt = x1 + dx - xt0 = xt - xt1 = x1 - else: - dx = (depth - h0) * slope - xt = x0 + dx - xt0 = x0 - xt1 = xt - x0 = xt0 - x1 = xt1 - return x0, x1 - - -def get_wetted_perimeter( - x0, - x1, - h0, - h1, - depth, -): - # -- calculate the minimum and maximum depth - hmin = min(h0, h1) - hmax = max(h0, h1) - - # -- calculate the wetted perimeter for the segment - xlen = x1 - x0 - if xlen > 0.0: - if depth > hmax: - dlen = hmax - hmin - else: - dlen = depth - hmin - else: - if depth > hmin: - dlen = min(depth, hmax) - hmin - else: - dlen = 0.0 - return np.sqrt(xlen**2.0 + dlen**2.0) - - -def get_wetted_area(x0, x1, h0, h1, depth): - # -- calculate the minimum and maximum depth - hmin = min(h0, h1) - hmax = max(h0, h1) - - # -- calculate the wetted area for the segment - xlen = x1 - x0 - area = 0.0 - if xlen > 0.0: - # -- add the area above hmax - if depth > hmax: - area = xlen * (depth - hmax) - # -- add the area below zmax - if hmax != hmin and depth > hmin: - area += 0.5 * (depth - hmin) - return area - - -def wetted_area( - x, - h, - depth, - verbose=False, -): - area = 0.0 - if x.shape[0] == 1: - area = x[0] * depth - else: - for idx in range(0, x.shape[0] - 1): - x0, x1 = x[idx], x[idx + 1] - h0, h1 = h[idx], h[idx + 1] - - # get station data - x0, x1 = get_wetted_station(x0, x1, h0, h1, depth) - - # get wetted area - a = get_wetted_area(x0, x1, h0, h1, depth) - area += a - - # write to screen - if verbose: - print( - f"{idx}->{idx + 1} ({x0},{x1}) - " - f"perimeter={x1 - x0} - area={a}" - ) - - return area - - -def wetted_perimeter( - x, - h, - depth, - verbose=False, -): - perimeter = 0.0 - if x.shape[0] == 1: - perimeter = x[0] - else: - for idx in range(0, x.shape[0] - 1): - x0, x1 = x[idx], x[idx + 1] - h0, h1 = h[idx], h[idx + 1] - - # get station data - x0, x1 = get_wetted_station(x0, x1, h0, h1, depth) - - # get wetted perimeter - perimeter += get_wetted_perimeter(x0, x1, h0, h1, depth) - - # write to screen - if verbose: - print(f"{idx}->{idx + 1} ({x0},{x1}) - perimeter={x1 - x0}") - - return perimeter - - -def manningsq( - x, - h, - depth, - roughness=0.01, - slope=0.001, - conv=1.0, -): - if isinstance(roughness, float): - roughness = np.ones(x.shape, dtype=float) * roughness - if x.shape[0] > 1: - q = 0.0 - for i0 in range(x.shape[0] - 1): - i1 = i0 + 1 - perimeter = get_wetted_perimeter(x[i0], x[i1], h[i0], h[i1], depth) - area = get_wetted_area(x[i0], x[i1], h[i0], h[i1], depth) - if perimeter > 0.0: - radius = area / perimeter - q += ( - conv * area * radius**mpow * slope**0.5 / roughness[i0] - ) - else: - perimeter = wetted_perimeter(x, h, depth) - area = wetted_area(x, h, depth) - radius = 0.0 - if perimeter > 0.0: - radius = area / perimeter - q = conv * area * radius**mpow * slope**0.5 / roughness[0] - return q - - -def get_depths( - flows, - x, - h, - roughness=0.01, - slope=0.001, - conv=1.0, - dd=1e-4, - verbose=False, -): - if isinstance(flows, float): - flows = np.array([flows], dtype=float) - if isinstance(roughness, float): - roughness = np.ones(x.shape, dtype=float) * roughness - depths = np.zeros(flows.shape, dtype=float) - for idx, q in enumerate(flows): - depths[idx] = qtodepth( - x, - h, - q, - roughness=roughness, - slope=slope, - conv=conv, - dd=dd, - verbose=False, - ) - - return depths - - -def qtodepth( - x, - h, - q, - roughness=0.01, - slope=0.001, - conv=1.0, - dd=1e-4, - verbose=False, -): - h0 = 0.0 - q0 = manningsq( - x, - h, - h0, - roughness=roughness, - slope=slope, - conv=conv, - ) - r = q0 - q - - iter = 0 - if verbose: - print(f"iteration {iter:>2d} - residual={r}") - while abs(r) > 1e-12: - q1 = manningsq( - x, - h, - h0 + dd, - roughness=roughness, - slope=slope, - conv=conv, - ) - dq = q1 - q0 - if dq != 0.0: - derv = dd / (q1 - q0) - else: - derv = 0.0 - h0 -= derv * r - q0 = manningsq( - x, - h, - h0, - roughness=roughness, - slope=slope, - conv=conv, - ) - r = q0 - q - - iter += 1 - if verbose: - print(f"iteration {iter:>2d} - residual={r}") - if iter > 100: - break - return h0 diff --git a/modflow_devtools/utilities/disu_util.py b/modflow_devtools/utilities/disu_util.py deleted file mode 100644 index 041e728..0000000 --- a/modflow_devtools/utilities/disu_util.py +++ /dev/null @@ -1,101 +0,0 @@ -import numpy as np - - -def get_disu_kwargs(nlay, nrow, ncol, delr, delc, tp, botm): - """ - Simple utility for creating args needed to construct - a disu package - - """ - - def get_nn(k, i, j): - return k * nrow * ncol + i * ncol + j - - nodes = nlay * nrow * ncol - iac = np.zeros((nodes), dtype=int) - ja = [] - area = np.zeros((nodes), dtype=float) - top = np.zeros((nodes), dtype=float) - bot = np.zeros((nodes), dtype=float) - ihc = [] - cl12 = [] - hwva = [] - for k in range(nlay): - for i in range(nrow): - for j in range(ncol): - # diagonal - n = get_nn(k, i, j) - ja.append(n) - iac[n] += 1 - area[n] = delr[i] * delc[j] - ihc.append(n + 1) - cl12.append(n + 1) - hwva.append(n + 1) - if k == 0: - top[n] = tp - else: - top[n] = botm[k - 1] - bot[n] = botm[k] - # up - if k > 0: - ja.append(get_nn(k - 1, i, j)) - iac[n] += 1 - ihc.append(0) - dz = botm[k - 1] - botm[k] - cl12.append(0.5 * dz) - hwva.append(delr[i] * delc[j]) - # back - if i > 0: - ja.append(get_nn(k, i - 1, j)) - iac[n] += 1 - ihc.append(1) - cl12.append(0.5 * delc[i]) - hwva.append(delr[j]) - # left - if j > 0: - ja.append(get_nn(k, i, j - 1)) - iac[n] += 1 - ihc.append(1) - cl12.append(0.5 * delr[j]) - hwva.append(delc[i]) - # right - if j < ncol - 1: - ja.append(get_nn(k, i, j + 1)) - iac[n] += 1 - ihc.append(1) - cl12.append(0.5 * delr[j]) - hwva.append(delc[i]) - # front - if i < nrow - 1: - ja.append(get_nn(k, i + 1, j)) - iac[n] += 1 - ihc.append(1) - cl12.append(0.5 * delc[i]) - hwva.append(delr[j]) - # bottom - if k < nlay - 1: - ja.append(get_nn(k + 1, i, j)) - iac[n] += 1 - ihc.append(0) - if k == 0: - dz = tp - botm[k] - else: - dz = botm[k - 1] - botm[k] - cl12.append(0.5 * dz) - hwva.append(delr[i] * delc[j]) - ja = np.array(ja, dtype=int) - nja = ja.shape[0] - hwva = np.array(hwva, dtype=float) - kw = {} - kw["nodes"] = nodes - kw["nja"] = nja - kw["nvert"] = None - kw["top"] = top - kw["bot"] = bot - kw["area"] = area - kw["iac"] = iac - kw["ja"] = ja - kw["ihc"] = ihc - kw["cl12"] = cl12 - kw["hwva"] = hwva - return kw diff --git a/modflow_devtools/utilities/mftest_exe.py b/modflow_devtools/utilities/mftest_exe.py deleted file mode 100644 index 32646c2..0000000 --- a/modflow_devtools/utilities/mftest_exe.py +++ /dev/null @@ -1,222 +0,0 @@ -import json -import os -import pathlib -import shutil -import subprocess -import sys -from contextlib import contextmanager - -from .download import download_and_unzip, getmfexes -from .usgsprograms import usgs_program_data - - -class MFTestExe: - """update and/or verify regression executables for test""" - - def __init__( - self, - releasebin: str = None, - builtbin: str = None, - targets: object = None, - ): - """MFTestExe init""" - - self._releasebin = releasebin - self._builtbin = builtbin - self._targets = targets - self._working_dir = os.path.abspath( - os.path.join(os.path.dirname(__file__), "temp") - ) - - def verify_exe(self): - """ - verify downloaded and built exe exist - """ - if not ( - os.path.isdir(self._releasebin) or os.path.isdir(self._builtbin) - ): - return False - - for t in self._targets.release_exe_names(): - if not os.path.isfile(os.path.join(self._releasebin, t)): - return False - - for t in self._targets.release_lib_names(): - if not os.path.isfile(os.path.join(self._releasebin, t)): - return False - - for t in self._targets.regression_exe_names(): - if not os.path.isfile(os.path.join(self._builtbin, t)): - return False - - for t in self._targets.regression_lib_names(): - if not os.path.isfile(os.path.join(self._builtbin, t)): - return False - - return True - - def releases_current(self): - """ - check downloaded versions against local db versions - """ - try: - with open(os.path.join(self._releasebin, "code.json")) as fh: - release_d = json.load(fh) - except: - return False - - program_d = usgs_program_data.get_program_dict() - exe_d = self._targets.target_exe_d() - if release_d and program_d: - for t in exe_d: - if t in release_d: - key = t - elif exe_d[t]["exe"] in release_d: - key = exe_d[t]["exe"] - if ( - key not in release_d - or release_d[key]["version"] != program_d[key]["version"] - ): - return False - - return True - - return False - - def download_releases(self): - """ - download mf released exe and copy to bin path - """ - self._download_exes() - - def build_mf6_release(self): - """ - download mf6 release source and build exe - """ - self._build_mf6_release() - - def cleanup(self): - """ - remove bins when possible - """ - shutil.rmtree(self._builtbin, ignore_errors=True) - shutil.rmtree(self._releasebin, ignore_errors=True) - - def _create_dirs(self): - pths = [self._releasebin, self._working_dir] - for pth in pths: - print(f"creating... {os.path.abspath(pth)}") - os.makedirs(pth, exist_ok=True) - errmsg = f"could not create... {os.path.abspath(pth)}" - assert os.path.exists(pth), errmsg - - def _download_exes(self): - self._create_dirs() - mfexe_pth = os.path.join(self._working_dir, "mfexes") - getmfexes(mfexe_pth, verify=False) - for target in os.listdir(mfexe_pth): - srcpth = os.path.join(mfexe_pth, target) - if os.path.isfile(srcpth): - dstpth = os.path.join(self._releasebin, target) - print(f"copying {srcpth} -> {dstpth}") - shutil.copy(srcpth, dstpth) - - @contextmanager - def _set_directory(self, path: str): - origin = os.path.abspath(os.getcwd()) - path = os.path.abspath(path) - try: - os.chdir(path) - print(f"change from {origin} -> {path}") - yield - finally: - os.chdir(origin) - print(f"change from {path} -> {origin}") - - def _set_compiler_environment_variable(self): - fc = None - - # parse command line arguments - for idx, arg in enumerate(sys.argv): - if arg.lower() == "-fc": - fc = sys.argv[idx + 1] - elif arg.lower().startswith("-fc="): - fc = arg.split("=")[1] - - # determine if fc needs to be set to the FC environmental variable - env_var = os.getenv("FC", default="gfortran") - if fc is None and fc != env_var: - fc = env_var - - # validate Fortran compiler - fc_options = ( - "gfortran", - "ifort", - ) - if fc not in fc_options: - raise ValueError( - f"Fortran compiler {fc} not supported. Fortran compile must be " - + f"[{', '.join(str(value) for value in fc_options)}]." - ) - - # set FC environment variable - os.environ["FC"] = fc - - def _meson_build( - self, - dir_path: str = "..", - libdir: str = "bin", - ): - self._set_compiler_environment_variable() - is_windows = sys.platform.lower() == "win32" - with self._set_directory(dir_path): - cmd = ( - "meson setup builddir " - + f"--bindir={os.path.abspath(libdir)} " - + f"--libdir={os.path.abspath(libdir)} " - + "--prefix=" - ) - if is_windows: - cmd += "%CD%" - else: - cmd += "$(pwd)" - if pathlib.Path("builddir").is_dir(): - cmd += " --wipe" - print(f"setup meson\nrunning...\n {cmd}") - subprocess.run(cmd, shell=True, check=True) - - cmd = "meson install -C builddir" - print(f"build and install with meson\nrunning...\n {cmd}") - subprocess.run(cmd, shell=True, check=True) - - def _build_mf6_release(self): - target_dict = usgs_program_data.get_target("mf6") - - download_and_unzip( - target_dict["url"], - pth=self._working_dir, - verbose=True, - ) - - # update IDEVELOP MODE in the release - srcpth = os.path.join( - self._working_dir, target_dict["dirname"], target_dict["srcdir"] - ) - fpth = os.path.join(srcpth, "Utilities", "version.f90") - with open(fpth) as f: - lines = f.read().splitlines() - assert len(lines) > 0, f"could not update {srcpth}" - - f = open(fpth, "w") - for line in lines: - tag = "IDEVELOPMODE = 0" - if tag in line: - line = line.replace(tag, "IDEVELOPMODE = 1") - f.write(f"{line}\n") - f.close() - - # build release source files with Meson - root_path = os.path.join(self._working_dir, target_dict["dirname"]) - self._meson_build( - dir_path=root_path, libdir=os.path.abspath(self._builtbin) - ) diff --git a/modflow_devtools/utilities/usgsprograms.py b/modflow_devtools/utilities/usgsprograms.py deleted file mode 100644 index dde395b..0000000 --- a/modflow_devtools/utilities/usgsprograms.py +++ /dev/null @@ -1,533 +0,0 @@ -"""Utility functions to extract information for a target from the USGS -application database. Available functionality includes: - -1. Get a list of available targets -2. Get data for a specific target -3. Get a dictionary with the data for all targets -4. Get the current version of a target -5. Get a list indicating if single and double precsion versions of the - target application should be built -6. Functions to load, update, and export a USGS-style "code.json" json file - containing information in the USGS application database - -A table listing the available pymake targets is included below: - -.. csv-table:: Available pymake targets - :file: ./usgsprograms.txt - :widths: 10, 10, 10, 20, 10, 10, 10, 10, 10 - :header-rows: 1 - -""" -import datetime -import json -import os -import sys - -from modflow_devtools.utilities.download import _request_header - - -class dotdict(dict): - """dot.notation access to dictionary attributes.""" - - __getattr__ = dict.get - __setattr__ = dict.__setitem__ - __delattr__ = dict.__delitem__ - - -# data file containing the USGS program data -program_data_file = "usgsprograms.txt" - -# keys to create for each target -target_keys = ( - "version", - "current", - "url", - "dirname", - "srcdir", - "standard_switch", - "double_switch", - "shared_object", - "url_download_asset_date", -) - - -def _str_to_bool(s): - """Convert "True" and "False" strings to a boolean. - - Parameters - ---------- - s : str - String representation of boolean - - Returns - ------- - - """ - if s == "True": - return True - elif s == "False": - return False - else: - msg = f'Invalid string passed - "{s}"' - raise ValueError(msg) - - -class usgs_program_data: - """USGS program database class.""" - - def __init__(self): - """USGS program database init.""" - self._program_dict = self._build_usgs_database() - - def _build_usgs_database(self): - """Build the USGS program database. - - Returns - ------- - - """ - # pth = os.path.dirname(os.path.abspath(pymake.__file__)) - pth = os.path.dirname(os.path.abspath(__file__)) - fpth = os.path.join(pth, program_data_file) - url_in = open(fpth, "r").read().split("\n") - - program_data = {} - for line in url_in[1:]: - # skip blank lines - if len(line.strip()) < 1: - continue - # parse comma separated line - t = [item.strip() for item in line.split(sep=",")] - # programmatically build a dictionary for each target - d = {} - for idx, key in enumerate(target_keys): - if key in ("url_download_asset_date",): - value = None - else: - value = t[idx + 1] - if key in ( - "current", - "standard_switch", - "double_switch", - "shared_object", - ): - value = _str_to_bool(value) - d[key] = value - - # make it possible to access each key with a dot (.) - d = dotdict(d) - program_data[t[0]] = d - - return dotdict(program_data) - - def _target_data(self, key): - """Get the dictionary for the target key. - - Parameters - ---------- - key : str - Program key (name) - - Returns - ------- - return : dict - dictionary with attributes for program key (name) - - """ - if key not in self._program_dict: - msg = f'"{key}" key does not exist. Available keys: ' - for idx, k in enumerate(self._program_dict.keys()): - if idx > 0: - msg += ", " - msg += f'"{k}"' - raise KeyError(msg) - return self._program_dict[key] - - def _target_keys(self, current=False): - """Get the target keys. - - Parameters - ---------- - current : bool - boolean indicating if only current program versions should be - returned. (default is False) - - Returns - ------- - keys : list - list containing program keys (names) - - """ - if current: - keys = [ - key - for key in self._program_dict.keys() - if self._program_dict[key].current - ] - else: - keys = list(self._program_dict.keys()) - return keys - - @staticmethod - def get_target(key): - """Get the dictionary for a specified target. - - Parameters - ---------- - key : str - Target USGS program that may have a path and an extension - - Returns - ------- - program_dict : dict - Dictionary with USGS program attributes for the specified key - - """ - # remove path and extension from key - key = os.path.basename(key) - if ( - key.endswith(".exe") - or key.endswith(".dll") - or key.endswith(".so") - or key.endswith(".dylib") - ): - key = os.path.splitext(key)[0] - - # return program attributes - return usgs_program_data()._target_data(key) - - @staticmethod - def get_keys(current=False): - """Get target keys from the USGS program database. - - Parameters - ---------- - current : bool - If False, all USGS program targets are listed. If True, - only USGS program targets that are defined as current are - listed. Default is False. - - Returns - ------- - keys : list - list of USGS program targets - - """ - - return usgs_program_data()._target_keys(current=current) - - @staticmethod - def get_program_dict(): - """Get the complete USGS program database. - - Returns - ------- - program_dict : dict - Dictionary with USGS program attributes for all targets - - """ - return usgs_program_data()._program_dict - - @staticmethod - def get_precision(key): - """Get the dictionary for a specified target. - - Parameters - ---------- - key : str - Target USGS program - - Returns - ------- - precision : list - List - - """ - target = usgs_program_data().get_target(key) - precision = [] - if target.standard_switch: - precision.append(False) - if target.double_switch: - precision.append(True) - return precision - - @staticmethod - def get_version(key): - """Get the current version of the specified target. - - Parameters - ---------- - key : str - Target USGS program - - Returns - ------- - version : str - current version of the specified target - - """ - target = usgs_program_data().get_target(key) - return target.version - - @staticmethod - def list_targets(current=False): - """Print a list of the available USGS program targets. - - Parameters - ---------- - current : bool - If False, all USGS program targets are listed. If True, - only USGS program targets that are defined as current are - listed. Default is False. - - Returns - ------- - - """ - targets = usgs_program_data()._target_keys(current=current) - targets.sort() - msg = "Available targets:\n" - for idx, target in enumerate(targets): - msg += f" {idx + 1:02d} {target}\n" - print(msg) - - return - - @staticmethod - def export_json( - fpth="code.json", - prog_data=None, - current=False, - update=True, - write_markdown=False, - verbose=False, - ): - """Export USGS program data as a json file. - - Parameters - ---------- - fpth : str - Path for the json file to be created. Default is "code.json" - prog_data : dict - User-specified program database. If prog_data is None, it will - be created from the USGS program database - current : bool - If False, all USGS program targets are listed. If True, - only USGS program targets that are defined as current are - listed. Default is False. - update : bool - If True, existing targets in the user-specified program database - with values in the USGS program database. If False, existing - targets in the user-specified program database will not be - updated. Default is True. - write_markdown : bool - If True, write markdown file that includes the target name, - version, and the last-modified date of the download asset (url). - Default is False. - verbose : bool - boolean for verbose output to terminal - - - Returns - ------- - - """ - # print a message - sel = "all of the" - if prog_data is not None: - sel = "select" - elif current: - sel = "the current" - print( - f'writing a json file ("{fpth}") ' - + f"of {sel} USGS programs\n" - + f'in the "{program_data_file}" database.' - ) - if prog_data is not None: - for idx, key in enumerate(prog_data.keys()): - print(f" {idx + 1:>2d}: {key}") - print("\n") - - # get usgs program data - udata = usgs_program_data.get_program_dict() - - # process the program data - if prog_data is None: - if current: - tdict = {} - for key, value in udata.items(): - if value.current: - tdict[key] = value - prog_data = tdict - # replace existing keys in prog_data with values from - # same key in usgs_program_data - else: - if update: - ukeys = usgs_program_data.get_keys() - pkeys = list(prog_data.keys()) - for key in pkeys: - if key in ukeys: - prog_data[key] = udata[key] - - # update the date of each asset if standard code.json object - for target, target_dict in prog_data.items(): - if "url" in target_dict.keys(): - url = target_dict["url"] - header = _request_header(url, verbose=verbose) - keys = list(header.headers.keys()) - for key in ("Last-Modified", "Date"): - if key in keys: - url_date = header.headers[key] - url_data_obj = datetime.datetime.strptime( - url_date, "%a, %d %b %Y %H:%M:%S %Z" - ) - datetime_obj_utc = url_data_obj.replace( - tzinfo=datetime.timezone.utc - ) - datetime_str = datetime_obj_utc.strftime("%m/%d/%Y") - prog_data[target][ - "url_download_asset_date" - ] = datetime_str - break - - # export file - try: - with open(fpth, "w") as f: - json.dump(prog_data, f, indent=4) - except: - msg = f'could not export json file "{fpth}"' - raise IOError(msg) - - # export code.json to --appdir directory, if the - # command line argument was specified. Only done if not CI - # command line argument was specified. Only done if not CI - appdir = "." - for idx, argv in enumerate(sys.argv): - if argv in ("--appdir", "-ad"): - appdir = sys.argv[idx + 1] - - # make appdir if it does not already exist - if not os.path.isdir(appdir): - os.makedirs(appdir) - - # write code.json - if appdir != ".": - dst = os.path.join(appdir, fpth) - with open(dst, "w") as f: - json.dump(prog_data, f, indent=4) - - # write code.md - if prog_data is not None and write_markdown: - file_obj = open("code.md", "w") - line = "| Program | Version | UTC Date |" - file_obj.write(line + "\n") - line = "| ------- | ------- | ---- |" - file_obj.write(line + "\n") - for target, target_dict in prog_data.items(): - keys = list(target_dict.keys()) - line = f"| {target} | {target_dict['version']} |" - date_key = "url_download_asset_date" - if date_key in keys: - line += f" {target_dict[date_key]} |" - else: - line += " |" - line += "\n" - file_obj.write(line) - file_obj.close() - - return - - @staticmethod - def load_json(fpth="code.json"): - """Load an existing code json file. Basic error checking is done to - make sure the file contains the correct keys. - - Parameters - ---------- - fpth : str - Path for the json file to be created. Default is "code.json" - - Returns - ------- - json_dict : dict - Valid USGS program database - - """ - try: - with open(fpth, "r") as f: - json_dict = json.load(f) - for key, value in json_dict.items(): - json_dict[key] = dotdict(value) - except: - json_dict = None - - # check that the json file has valid keys - msg = f'invalid json format in "{fpth}"' - if json_dict is not None: - for key, value in json_dict.items(): - try: - for kk in value.keys(): - if kk not in target_keys: - raise KeyError(msg + f' - key ("{kk}")') - except: - raise KeyError(msg) - - return json_dict - - @staticmethod - def list_json(fpth="code.json"): - """List an existing code json file. - - Parameters - ---------- - fpth : str - Path for the json file to be listed. Default is "code.json" - - Returns - ------- - - """ - json_dict = usgs_program_data.load_json(fpth) - - if json_dict is not None: - print(f'Data in "{fpth}"') - for key, value in json_dict.items(): - print(f" target: {key}") - for kkey, vvalue in value.items(): - print(f" {kkey}: {vvalue}") - else: - msg = f'could not load json file "{fpth}".' - raise IOError(msg) - - # print continuation line - print("\n") - - return - - @staticmethod - def update_json(fpth="code.json", temp_dict=None): - """UPDATE an existing code json file. - - Parameters - ---------- - fpth : str - Path for the json file to be listed. Default is "code.json" - - temp_dict : dict - Dictionary with USGS program data for a target - - Returns - ------- - - """ - if temp_dict is not None: - if os.path.isfile(fpth): - json_dict = usgs_program_data.load_json(fpth=fpth) - if json_dict is not None: - for key, value in temp_dict.items(): - if key not in list(json_dict.keys()): - json_dict[key] = value - temp_dict = json_dict - usgs_program_data.export_json(fpth, prog_data=temp_dict) - - return diff --git a/modflow_devtools/utilities/usgsprograms.txt b/modflow_devtools/utilities/usgsprograms.txt deleted file mode 100644 index c0c5072..0000000 --- a/modflow_devtools/utilities/usgsprograms.txt +++ /dev/null @@ -1,25 +0,0 @@ -target , version, current, url , dirname , srcdir , standard_switch, double_switch, shared_object -mf6 , 6.3.0 , True , https://github.com/MODFLOW-USGS/modflow6/releases/download/6.3.0/mf6.3.0_linux.zip , mf6.3.0_linux , src , True , False , False -zbud6 , 6.3.0 , True , https://github.com/MODFLOW-USGS/modflow6/releases/download/6.3.0/mf6.3.0_linux.zip , mf6.3.0_linux , utils/zonebudget/src, True , False , False -libmf6 , 6.3.0 , True , https://github.com/MODFLOW-USGS/modflow6/releases/download/6.3.0/mf6.3.0_linux.zip , mf6.3.0_linux , srcbmi , True , False , True -mp7 , 7.2.001, True , https://water.usgs.gov/water-resources/software/MODPATH/modpath_7_2_001.zip , modpath_7_2_001 , source , True , False , False -mt3dms , 5.3.0 , True , https://hydro.geo.ua.edu/mt3d/mt3dms_530.exe , mt3dms5.3.0 , src/true-binary , True , False , False -mt3dusgs , 1.1.0 , True , https://water.usgs.gov/water-resources/software/MT3D-USGS/mt3dusgs1.1.0.zip , mt3dusgs1.1.0 , src , True , False , False -vs2dt , 3.3 , True , https://water.usgs.gov/water-resources/software/VS2DI/vs2dt3_3.zip , vs2dt3_3 , include , True , False , False -triangle , 1.6 , True , https://www.netlib.org/voronoi/triangle.zip , triangle1.6 , src , True , False , False -gridgen , 1.0.02 , True , https://water.usgs.gov/water-resources/software/GRIDGEN/gridgen.1.0.02.zip , gridgen.1.0.02 , src , True , False , False -crt , 1.3.1 , True , https://water.usgs.gov/ogw/CRT/CRT_1.3.1.zip , CRT_1.3.1 , SOURCE , True , False , False -gsflow , 2.2.0 , True , https://water.usgs.gov/water-resources/software/gsflow/gsflow_2.2.0_linux.zip , gsflow_2.2.0_linux , src , True , False , False -sutra , 3.0 , True , https://water.usgs.gov/water-resources/software/sutra/SUTRA_3_0_0.zip , SutraSuite , SUTRA_3_0/source , True , False , False -mf2000 , 1.19.01, True , https://water.usgs.gov/nrp/gwsoftware/modflow2000/mf2k1_19_01.tar.gz , mf2k.1_19 , src , True , False , False -mf2005 , 1.12.00, True , https://github.com/MODFLOW-USGS/mf2005/releases/download/v.1.12.00/MF2005.1_12u.zip , MF2005.1_12u , src , True , True , False -mf2005.1.11, 1.11.00, False , https://water.usgs.gov/ogw/modflow/archive-mf2005/MODFLOW-2005_v1.11.00/mf2005v1_11_00_unix.zip, Unix , src , True , False , False -mfusg , 1.5 , True , https://water.usgs.gov/water-resources/software/MODFLOW-USG/mfusg1_5.zip , mfusg1_5 , src , True , True , False -zonbudusg , 1.5 , True , https://water.usgs.gov/water-resources/software/MODFLOW-USG/mfusg1_5.zip , mfusg1_5 , src/zonebudusg , True , False , False -swtv4 , 4.00.05, True , https://water.usgs.gov/water-resources/software/SEAWAT/swt_v4_00_05.zip , swt_v4_00_05 , source , False , True , False -mp6 , 6.0.1 , True , https://water.usgs.gov/water-resources/software/MODPATH/modpath.6_0_01.zip , modpath.6_0 , src , True , False , False -mflgr , 2.0.0 , True , https://water.usgs.gov/ogw/modflow-lgr/modflow-lgr-v2.0.0/mflgrv2_0_00.zip , mflgr.2_0 , src , True , True , False -zonbud3 , 3.01 , True , https://water.usgs.gov/water-resources/software/ZONEBUDGET/zonbud3_01.exe , Zonbud.3_01 , Src , True , False , False -mfnwt1.1.4 , 1.1.4 , False , https://water.usgs.gov/water-resources/software/MODFLOW-NWT/MODFLOW-NWT_1.1.4.zip , MODFLOW-NWT_1.1.4 , src , True , False , False -mfnwt , 1.2.0 , True , https://water.usgs.gov/water-resources/software/MODFLOW-NWT/MODFLOW-NWT_1.2.0.zip , MODFLOW-NWT_1.2.0 , src , True , True , False -prms , 5.2.1 , True , https://water.usgs.gov/water-resources/software/PRMS/prms_5.2.1_linux.zip , prms_5.2.1_linux , src , True , False , False diff --git a/modflow_devtools/zip.py b/modflow_devtools/zip.py new file mode 100644 index 0000000..4590327 --- /dev/null +++ b/modflow_devtools/zip.py @@ -0,0 +1,183 @@ +import os +from zipfile import ZIP_DEFLATED, ZipFile, ZipInfo + + +class MFZipFile(ZipFile): + """ + ZipFile modified to preserve file attributes. + https://stackoverflow.com/questions/39296101/python-zipfile-removes-execute-permissions-from-binaries + """ + + def extract(self, member, path=None, pwd=None): + """ + + Parameters + ---------- + member : str + individual file to extract. If member does not exist, all files + are extracted. + path : str + directory path to extract file in a zip file (default is None, + which results in files being extracted in the current directory) + pwd : str + zip file password (default is None) + + Returns + ------- + ret_val : int + return value indicating status of file extraction + + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + if path is None: + path = os.getcwd() + + ret_val = self._extract_member(member, str(path), pwd) + attr = member.external_attr >> 16 + if attr != 0: + os.chmod(ret_val, attr) + + return ret_val + + def extractall(self, path=None, members=None, pwd=None): + """Extract all files in the zipfile. + + Parameters + ---------- + path : str + directory path to extract files in a zip file (default is None, + which results in files being extracted in the current directory) + members : str + individual files to extract (default is None, which extracts + all members) + pwd : str + zip file password (default is None) + + Returns + ------- + + """ + if members is None: + members = self.namelist() + + if path is None: + path = os.getcwd() + else: + if hasattr(os, "fspath"): + # introduced in python 3.6 and above + path = os.fspath(str(path)) + + for zipinfo in members: + self.extract(zipinfo, str(path), pwd) + + @staticmethod + def compressall(path, file_pths=None, dir_pths=None, patterns=None): + """Compress selected files or files in selected directories. + + Parameters + ---------- + path : str + output zip file path + file_pths : str or list of str + file paths to include in the output zip file (default is None) + dir_pths : str or list of str + directory paths to include in the output zip file (default is None) + patterns : str or list of str + file patterns to include in the output zip file (default is None) + + Returns + ------- + success : bool + boolean indicating if the output zip file was created + + """ + + # create an empty list + if file_pths is None: + file_pths = [] + # convert files to a list + else: + if isinstance(file_pths, str): + file_pths = [file_pths] + elif isinstance(file_pths, tuple): + file_pths = list(file_pths) + + # remove directories from the file list + if len(file_pths) > 0: + file_pths = [e for e in file_pths if os.path.isfile(e)] + + # convert dirs to a list if a str (a tuple is allowed) + if dir_pths is None: + dir_pths = [] + else: + if isinstance(dir_pths, str): + dir_pths = [dir_pths] + + # convert find to a list if a str (a tuple is allowed) + if patterns is not None: + if isinstance(patterns, str): + patterns = [patterns] + + # walk through dirs and add files to the list + for dir_pth in dir_pths: + for dirname, subdirs, files in os.walk(dir_pth): + for filename in files: + fpth = os.path.join(dirname, filename) + # add the file if it does not exist in file_pths + if fpth not in file_pths: + file_pths.append(fpth) + + # remove file_paths that do not match the patterns + if patterns is not None: + tlist = [] + for file_pth in file_pths: + if any(p in os.path.basename(file_pth) for p in patterns): + tlist.append(file_pth) + file_pths = tlist + + # write the zipfile + success = True + if len(file_pths) > 0: + zf = ZipFile(path, "w", ZIP_DEFLATED) + + # write files to zip file + for file_pth in file_pths: + arcname = os.path.basename(file_pth) + zf.write(file_pth, arcname=arcname) + + # close the zip file + zf.close() + else: + msg = "No files to add to the zip file" + print(msg) + success = False + + return success + + +def zip_all(path, file_pths=None, dir_pths=None, patterns=None): + """Compress all files in the user-provided list of file paths and directory + paths that match the provided file patterns. + + Parameters + ---------- + path : str + path of the zip file that will be created + file_pths : str or list + file path or list of file paths to be compressed + dir_pths : str or list + directory path or list of directory paths to search for files that + will be compressed + patterns : str or list + file pattern or list of file patterns s to match to when creating a + list of files that will be compressed + + Returns + ------- + + """ + return MFZipFile.compressall( + path, file_pths=file_pths, dir_pths=dir_pths, patterns=patterns + ) diff --git a/pyproject.toml b/pyproject.toml index 71e753e..a9a9ec6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,8 +16,11 @@ verbose = true [tool.isort] profile = "black" -src_paths = ["modflow_devtools", "autotest", ] +src_paths = ["src/modflow_devtools"] line_length = 79 [tool.setuptools_scm] -fallback_version = "999" \ No newline at end of file +fallback_version = "999" + +[tool.pytest.ini_options] +addopts = ["--import-mode=importlib"] \ No newline at end of file diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..2e0fa86 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +addopts = -ra +python_files = + test_*.py + *_test*.py +markers = + slow: tests that don't complete in a few seconds + meta: run by other tests (e.g. testing fixtures) \ No newline at end of file diff --git a/scripts/pull_request_prepare.py b/scripts/pull_request_prepare.py new file mode 100644 index 0000000..315500a --- /dev/null +++ b/scripts/pull_request_prepare.py @@ -0,0 +1,21 @@ +import os + +try: + import isort + + print(f"isort version: {isort.__version__}") +except ModuleNotFoundError: + print("isort not installed\n\tInstall using pip install isort") + +try: + import black + + print(f"black version: {black.__version__}") +except ModuleNotFoundError: + print("black not installed\n\tInstall using pip install black") + +print("running isort...") +os.system("isort -v .") + +print("running black...") +os.system("black -v .") diff --git a/setup.cfg b/setup.cfg index d9c6f92..84084ef 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = modflow-devtools -version = attr: modflow_devtools.config.__version__ +version = 0.0.1 description = modflow-devtools is a Python package containing tools for MODFLOW development. long_description = file: README.md, LICENSE.md long_description_content_type = text/markdown @@ -16,7 +16,16 @@ classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Science/Research License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication + Operating System :: Microsoft :: Windows + Operating System :: POSIX + Operating System :: Unix + Operating System :: MacOS Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Programming Language :: Python :: 3 :: Only Topic :: Scientific/Engineering :: Hydrology url = https://github.com/MODFLOW-USGS/modflow-devtools @@ -24,7 +33,7 @@ download_url = https://pypi.org/project/modflow-devtools project_urls = Documentation = https://mfpymake.readthedocs.io Bug Tracker = https://github.com/MODFLOW-USGS/modflow-devtools/issues - Source Code = https://github.com/MODFLOW-USGS/modflow-devtools/pymake + Source Code = https://github.com/MODFLOW-USGS/modflow-devtools [options] include_package_data = True # includes files listed in MANIFEST.in @@ -33,11 +42,23 @@ packages = find: python_requires = >=3.7 install_requires = numpy - requests - flopy + pytest -[options.package_data] -modflow_devtools = utilities/usgsprograms.txt +[options.extras_require] +lint = + black + cffconvert + flake8 + isort + pylint +test = + %(lint)s + coverage + flaky + pytest-cases + pytest-cov + pytest-dotenv + pytest-xdist [flake8] exclude =