From 186b2d5f00ac0096b2487e85179f4e0524a70799 Mon Sep 17 00:00:00 2001 From: "Jessica S. Yu" <15913767+jessicasyu@users.noreply.github.com> Date: Wed, 18 Sep 2024 17:08:40 -0400 Subject: [PATCH] Add unit tests for arcade collection convert tasks (#80) * Bump scikit-image (0.19.3 -> 0.24.0) * Add Python 3.11 to workflows * Add Python 3.11 to tox * Start adding unit tests and documentation for convert to simularium task * Finish adding unit tests and docstrings for convert to simularium tasks * Fix linting issues * Move build tar instance to test utilities module * Add docstrings and unit tests for convert to contours task * Add docstrings and unit tests for convert to images task * Remove test return values * Add docstrings and unit tests for convert to meshes task * Rename to locations tar for clarity * Add ignore to pyproject * Apply linting suggestions for convert to meshes task * Update convert to images task to support TFE format * Add docstrings and unit tests for convert to colorizer task * Rename colorizer to TFE * Add docstrings to convert to projection task --- .github/workflows/build.yml | 5 +- .github/workflows/documentation.yml | 3 +- .github/workflows/lint.yml | 3 +- poetry.lock | 154 ++- pyproject.toml | 9 +- src/arcade_collection/convert/__init__.py | 4 +- .../convert/convert_to_colorizer.py | 77 -- .../convert/convert_to_contours.py | 46 +- .../convert/convert_to_images.py | 163 ++- .../convert/convert_to_meshes.py | 243 +++- .../convert/convert_to_projection.py | 72 +- .../convert/convert_to_simularium.py | 273 ++++- .../convert/convert_to_simularium_objects.py | 79 +- .../convert/convert_to_simularium_shapes.py | 313 ++++- .../convert/convert_to_tfe.py | 145 +++ tests/arcade_collection/convert/__init__.py | 0 .../convert/test_convert_to_contours.py | 336 ++++++ .../convert/test_convert_to_images.py | 1034 +++++++++++++++++ .../convert/test_convert_to_meshes.py | 432 +++++++ .../convert/test_convert_to_simularium.py | 386 ++++++ .../test_convert_to_simularium_objects.py | 150 ++- .../test_convert_to_simularium_shapes.py | 775 +++++++++++- .../convert/test_convert_to_tfe.py | 72 ++ tests/arcade_collection/convert/utilities.py | 16 + 24 files changed, 4422 insertions(+), 368 deletions(-) delete mode 100644 src/arcade_collection/convert/convert_to_colorizer.py create mode 100644 src/arcade_collection/convert/convert_to_tfe.py create mode 100644 tests/arcade_collection/convert/__init__.py create mode 100644 tests/arcade_collection/convert/test_convert_to_contours.py create mode 100644 tests/arcade_collection/convert/test_convert_to_images.py create mode 100644 tests/arcade_collection/convert/test_convert_to_meshes.py create mode 100644 tests/arcade_collection/convert/test_convert_to_simularium.py create mode 100644 tests/arcade_collection/convert/test_convert_to_tfe.py create mode 100644 tests/arcade_collection/convert/utilities.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f416425..a337c72 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ jobs: strategy: matrix: - python-version: ["3.9", "3.10"] + python-version: ["3.9", "3.10", "3.11"] steps: @@ -16,6 +16,7 @@ jobs: uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} + id: setup-python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -31,7 +32,7 @@ jobs: uses: actions/cache@v4 with: path: .venv - key: ${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} + key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} - name: Install dependencies if: steps.cached-dependencies.outputs.cache-hit != 'true' diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index d2e7be8..981b947 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -15,6 +15,7 @@ jobs: uses: actions/checkout@v4 - name: Set up Python 3.9 + id: setup-python uses: actions/setup-python@v5 with: python-version: 3.9 @@ -30,7 +31,7 @@ jobs: uses: actions/cache@v4 with: path: .venv - key: ${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} + key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} - name: Install dependencies if: steps.cached-dependencies.outputs.cache-hit != 'true' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b4eac0f..8c184d4 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -12,6 +12,7 @@ jobs: uses: actions/checkout@v4 - name: Set up Python 3.9 + id: setup-python uses: actions/setup-python@v5 with: python-version: 3.9 @@ -27,7 +28,7 @@ jobs: uses: actions/cache@v4 with: path: .venv - key: ${{ runner.os }}-${{ hashFiles('**/poetry.lock') }} + key: ${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} - name: Install dependencies if: steps.cached-dependencies.outputs.cache-hit != 'true' diff --git a/poetry.lock b/poetry.lock index ab81e69..143383a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1236,13 +1236,13 @@ files = [ [[package]] name = "imageio" -version = "2.25.1" +version = "2.35.1" description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "imageio-2.25.1-py3-none-any.whl", hash = "sha256:5bce7f88eef7ee4e9aac798d3b218fea2e98cbbaa59a3e37b730a7aa5784eeac"}, - {file = "imageio-2.25.1.tar.gz", hash = "sha256:6021d42debd2187e9c781e494a49a30eba002fbac1eef43f491bbc731e7a6d2b"}, + {file = "imageio-2.35.1-py3-none-any.whl", hash = "sha256:6eb2e5244e7a16b85c10b5c2fe0f7bf961b40fcb9f1a9fd1bd1d2c2f8fb3cd65"}, + {file = "imageio-2.35.1.tar.gz", hash = "sha256:4952dfeef3c3947957f6d5dedb1f4ca31c6e509a476891062396834048aeed2a"}, ] [package.dependencies] @@ -1250,20 +1250,21 @@ numpy = "*" pillow = ">=8.3.2" [package.extras] -all-plugins = ["astropy", "av", "imageio-ffmpeg", "opencv-python", "psutil", "tifffile"] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"] all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"] build = ["wheel"] -dev = ["black", "flake8", "fsspec[github]", "invoke", "pytest", "pytest-cov"] +dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] ffmpeg = ["imageio-ffmpeg", "psutil"] fits = ["astropy"] -full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "invoke", "itk", "numpydoc", "opencv-python", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx (<6)", "tifffile", "wheel"] +full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpy (>2)", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "rawpy", "sphinx (<6)", "tifffile", "wheel"] gdal = ["gdal"] itk = ["itk"] linting = ["black", "flake8"] -opencv = ["opencv-python"] +pillow-heif = ["pillow-heif"] pyav = ["av"] -test = ["fsspec[github]", "invoke", "pytest", "pytest-cov"] +rawpy = ["numpy (>2)", "rawpy"] +test = ["fsspec[github]", "pytest", "pytest-cov"] tifffile = ["tifffile"] [[package]] @@ -1568,6 +1569,25 @@ websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" [package.extras] adal = ["adal (>=1.0.2)"] +[[package]] +name = "lazy-loader" +version = "0.4" +description = "Makes it easy to load subpackages and functions on demand." +optional = false +python-versions = ">=3.7" +files = [ + {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"}, + {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +dev = ["changelist (==0.5)"] +lint = ["pre-commit (==3.7.0)"] +test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] + [[package]] name = "lazy-object-proxy" version = "1.9.0" @@ -2895,43 +2915,6 @@ files = [ {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, ] -[[package]] -name = "PyWavelets" -version = "1.4.1" -description = "PyWavelets, wavelet transform module" -optional = false -python-versions = ">=3.8" -files = [ - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"}, - {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"}, - {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"}, - {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"}, - {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"}, - {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ab7da0a17822cd2f6545626946d3b82d1a8e106afc4b50e3387719ba01c7b966"}, - {file = "PyWavelets-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:578af438a02a86b70f1975b546f68aaaf38f28fb082a61ceb799816049ed18aa"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb5ca8d11d3f98e89e65796a2125be98424d22e5ada360a0dbabff659fca0fc"}, - {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:058b46434eac4c04dd89aeef6fa39e4b6496a951d78c500b6641fd5b2cc2f9f4"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win32.whl", hash = "sha256:de7cd61a88a982edfec01ea755b0740e94766e00a1ceceeafef3ed4c85c605cd"}, - {file = "PyWavelets-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:7ab8d9db0fe549ab2ee0bea61f614e658dd2df419d5b75fba47baa761e95f8f2"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:23bafd60350b2b868076d976bdd92f950b3944f119b4754b1d7ff22b7acbf6c6"}, - {file = "PyWavelets-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0e56cd7a53aed3cceca91a04d62feb3a0aca6725b1912d29546c26f6ea90426"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030670a213ee8fefa56f6387b0c8e7d970c7f7ad6850dc048bd7c89364771b9b"}, - {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ab30f51ee4470741bb55fc6b197b4a2b612232e30f6ac069106f0156342356"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win32.whl", hash = "sha256:47cac4fa25bed76a45bc781a293c26ac63e8eaae9eb8f9be961758d22b58649c"}, - {file = "PyWavelets-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:88aa5449e109d8f5e7f0adef85f7f73b1ab086102865be64421a3a3d02d277f4"}, - {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"}, -] - -[package.dependencies] -numpy = ">=1.17.3" - [[package]] name = "pywin32" version = "305" @@ -3306,54 +3289,51 @@ files = [ [[package]] name = "scikit-image" -version = "0.19.3" +version = "0.24.0" description = "Image processing in Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "scikit-image-0.19.3.tar.gz", hash = "sha256:24b5367de1762da6ee126dd8f30cc4e7efda474e0d7d70685433f0e3aa2ec450"}, - {file = "scikit_image-0.19.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:3a01372ae4bca223873304b0bff79b9d92446ac6d6177f73d89b45561e2d09d8"}, - {file = "scikit_image-0.19.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fdf48d9b1f13af69e4e2c78e05067e322e9c8c97463c315cd0ecb47a94e259fc"}, - {file = "scikit_image-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b6a8f98f2ac9bb73706461fd1dec875f6a5141759ed526850a5a49e90003d19"}, - {file = "scikit_image-0.19.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfbb073f23deb48e0e60c47f8741d8089121d89cc78629ea8c5b51096efc5be7"}, - {file = "scikit_image-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:cc24177de3fdceca5d04807ad9c87d665f0bf01032ed94a9055cd1ed2b3f33e9"}, - {file = "scikit_image-0.19.3-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:fd9dd3994bb6f9f7a35f228323f3c4dc44b3cf2ff15fd72d895216e9333550c6"}, - {file = "scikit_image-0.19.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ad5d8000207a264d1a55681a9276e6a739d3f05cf4429004ad00d61d1892235f"}, - {file = "scikit_image-0.19.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:84baa3179f3ae983c3a5d81c1e404bc92dcf7daeb41bfe9369badcda3fb22b92"}, - {file = "scikit_image-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f9f8a1387afc6c70f2bed007c3854a2d7489f9f7713c242f16f32ee05934bc2"}, - {file = "scikit_image-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:9fb0923a3bfa99457c5e17888f27b3b8a83a3600b4fef317992e7b7234764732"}, - {file = "scikit_image-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:ce3d2207f253b8eb2c824e30d145a9f07a34a14212d57f3beca9f7e03c383cbe"}, - {file = "scikit_image-0.19.3-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:2a02d1bd0e2b53e36b952bd5fd6118d9ccc3ee51de35705d63d8eb1f2e86adef"}, - {file = "scikit_image-0.19.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:03779a7e1736fdf89d83c0ba67d44110496edd736a3bfce61a2b5177a1c8a099"}, - {file = "scikit_image-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19a21a101a20c587a3b611a2cf6f86c35aae9f8d9563279b987e83ee1c9a9790"}, - {file = "scikit_image-0.19.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f50b923f8099c1045fcde7418d86b206c87e333e43da980f41d8577b9605245"}, - {file = "scikit_image-0.19.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e207c6ce5ce121d7d9b9d2b61b9adca57d1abed112c902d8ffbfdc20fb42c12b"}, - {file = "scikit_image-0.19.3-cp38-cp38-win32.whl", hash = "sha256:a7c3985c68bfe05f7571167ee021d14f5b8d1a4a250c91f0b13be7fb07e6af34"}, - {file = "scikit_image-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:651de1c2ce1fbee834753b46b8e7d81cb12a5594898babba63ac82b30ddad49d"}, - {file = "scikit_image-0.19.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:8d8917fcf85b987b1f287f823f3a1a7dac38b70aaca759bc0200f3bc292d5ced"}, - {file = "scikit_image-0.19.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0b0a199157ce8487c77de4fde0edc0b42d6d42818881c11f459262351d678b2d"}, - {file = "scikit_image-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33dfd463ee6cc509defa279b963829f2230c9e0639ccd3931045be055878eea6"}, - {file = "scikit_image-0.19.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8714348ddd671f819457a797c97d4c672166f093def66d66c3254cbd1d43f83"}, - {file = "scikit_image-0.19.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff3b1025356508d41f4fe48528e509d95f9e4015e90cf158cd58c56dc63e0ac5"}, - {file = "scikit_image-0.19.3-cp39-cp39-win32.whl", hash = "sha256:9439e5294de3f18d6e82ec8eee2c46590231cf9c690da80545e83a0733b7a69e"}, - {file = "scikit_image-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:32fb88cc36203b99c9672fb972c9ef98635deaa5fc889fe969f3e11c44f22919"}, + {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"}, + {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"}, + {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"}, + {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"}, + {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"}, + {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"}, + {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"}, ] [package.dependencies] -imageio = ">=2.4.1" -networkx = ">=2.2" -numpy = ">=1.17.0" -packaging = ">=20.0" -pillow = ">=6.1.0,<7.1.0 || >7.1.0,<7.1.1 || >7.1.1,<8.3.0 || >8.3.0" -PyWavelets = ">=1.1.1" -scipy = ">=1.4.1" -tifffile = ">=2019.7.26" +imageio = ">=2.33" +lazy-loader = ">=0.4" +networkx = ">=2.8" +numpy = ">=1.23" +packaging = ">=21" +pillow = ">=9.1" +scipy = ">=1.9" +tifffile = ">=2022.8.12" [package.extras] -data = ["pooch (>=1.3.0)"] -docs = ["cloudpickle (>=0.2.1)", "dask[array] (>=0.15.0,!=2.17.0)", "ipywidgets", "kaleido", "matplotlib (>=3.3)", "myst-parser", "numpydoc (>=1.0)", "pandas (>=0.23.0)", "plotly (>=4.14.0)", "pooch (>=1.3.0)", "pytest-runner", "scikit-learn", "seaborn (>=0.7.1)", "sphinx (>=1.8)", "sphinx-copybutton", "sphinx-gallery (>=0.10.1)", "tifffile (>=2020.5.30)"] -optional = ["SimpleITK", "astropy (>=3.1.2)", "cloudpickle (>=0.2.1)", "dask[array] (>=1.0.0,!=2.17.0)", "matplotlib (>=3.0.3)", "pooch (>=1.3.0)", "pyamg", "qtpy"] -test = ["asv", "codecov", "flake8", "matplotlib (>=3.0.3)", "pooch (>=1.3.0)", "pytest (>=5.2.0)", "pytest-cov (>=2.7.0)", "pytest-faulthandler", "pytest-localserver"] +build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"] +data = ["pooch (>=1.6.0)"] +developer = ["ipython", "pre-commit", "tomli"] +docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] +optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] +test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"] [[package]] name = "scipy" @@ -4188,4 +4168,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "8e6eff1c0a4314af558ef74343e572d0b8282dab7620f8ab57c9d1fd11887ca0" +content-hash = "c82316b7a13b93553b3ca72a7378c289b11bfe74e64403351c1b666bcc8ccbdf" diff --git a/pyproject.toml b/pyproject.toml index ca35826..0885dde 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ python = "^3.9" prefect = "^2.14.3" numpy = "^1.24.2" pandas = "^1.5.3" -scikit-image = "^0.19.3" +scikit-image = "^0.24.0" simulariumio = "^1.7.0" matplotlib = "^3.7.2" @@ -43,7 +43,7 @@ build-backend = "poetry.core.masonry.api" disable = [ "missing-module-docstring", ] -good-names = ["i", "j", "k", "x", "y", "z", "u", "v", "w", "ds", "dt", "ax"] +good-names = ["i", "j", "k", "x", "y", "z", "u", "v", "w", "ds", "dt", "dx", "dy", "dz", "ax"] [tool.pylint.design] max-args = 10 # maximum number of arguments for function / method @@ -95,6 +95,9 @@ max-args = 10 "INP001", # implicit-namespace-package "ANN201", # missing-return-type-undocumented-public-function "S311", # suspicious-non-cryptographic-random-usage + "ANN001", # missing-type-function-argument + "ANN003", # missing-type-kwargs + "ANN202", # missing-type-args ] [tool.coverage.report] @@ -107,7 +110,7 @@ exclude_lines = [ legacy_tox_ini = """ [tox] isolated_build = True -envlist = py{39,310}, format, lint, typecheck +envlist = py{39,310,311}, format, lint, typecheck skipsdist=True [testenv] diff --git a/src/arcade_collection/convert/__init__.py b/src/arcade_collection/convert/__init__.py index 9db0c5b..ee93948 100644 --- a/src/arcade_collection/convert/__init__.py +++ b/src/arcade_collection/convert/__init__.py @@ -2,7 +2,6 @@ from prefect import task -from .convert_to_colorizer import convert_to_colorizer from .convert_to_contours import convert_to_contours from .convert_to_images import convert_to_images from .convert_to_meshes import convert_to_meshes @@ -10,8 +9,8 @@ from .convert_to_simularium import convert_to_simularium from .convert_to_simularium_objects import convert_to_simularium_objects from .convert_to_simularium_shapes import convert_to_simularium_shapes +from .convert_to_tfe import convert_to_tfe -convert_to_colorizer = task(convert_to_colorizer) convert_to_contours = task(convert_to_contours) convert_to_images = task(convert_to_images) convert_to_meshes = task(convert_to_meshes) @@ -19,3 +18,4 @@ convert_to_simularium = task(convert_to_simularium) convert_to_simularium_objects = task(convert_to_simularium_objects) convert_to_simularium_shapes = task(convert_to_simularium_shapes) +convert_to_tfe = task(convert_to_tfe) diff --git a/src/arcade_collection/convert/convert_to_colorizer.py b/src/arcade_collection/convert/convert_to_colorizer.py deleted file mode 100644 index d5d8adb..0000000 --- a/src/arcade_collection/convert/convert_to_colorizer.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np -import pandas as pd - -from arcade_collection.output.convert_model_units import convert_model_units - - -def convert_to_colorizer( - all_data: pd.DataFrame, - features: list[str], - frame_spec: tuple[int, int, int], - ds: float, - dt: float, - regions: list[str], -) -> dict: - frames = list(np.arange(*frame_spec)) - manifest = get_manifest_data(features, frames) - - convert_model_units(all_data, ds, dt, regions) - frame_data = all_data[all_data["TICK"].isin(frames)] - - outliers = get_outliers_from_data(frame_data) - tracks = get_tracks_from_data(frame_data) - times = get_times_from_data(frame_data) - - colorizer_json = { - "manifest": manifest, - "outliers": outliers, - "tracks": tracks, - "times": times, - } - - for feature in features: - colorizer_json[feature] = get_feature_from_data(frame_data, feature) - - return colorizer_json - - -def get_manifest_data(features: list[str], frames: list[int]) -> dict: - manifest = { - "frames": [f"frame_{i}.png" for i in range(len(frames))], - "features": { - feature: f"feature_{feature_index}.json" - for feature_index, feature in enumerate(features) - }, - "outliers": "outliers.json", - "tracks": "tracks.json", - "times": "times.json", - } - - return manifest - - -def get_outliers_from_data(data: pd.DataFrame) -> dict: - outliers = [False] * len(data) - outliers_json = {"data": outliers, "min": False, "max": True} - return outliers_json - - -def get_tracks_from_data(data: pd.DataFrame) -> dict: - tracks = data["ID"] - tracks_json = {"data": list(tracks)} - return tracks_json - - -def get_times_from_data(data: pd.DataFrame) -> dict: - times = data["time"] - times_json = {"data": list(times)} - return times_json - - -def get_feature_from_data(data: pd.DataFrame, feature: str) -> dict: - feature_values = data[feature] - feature_min = float(np.nanmin(feature_values)) - feature_max = float(np.nanmax(feature_values)) - - feature_json = {"data": list(feature_values), "min": feature_min, "max": feature_max} - return feature_json diff --git a/src/arcade_collection/convert/convert_to_contours.py b/src/arcade_collection/convert/convert_to_contours.py index f3bd0bd..35103be 100644 --- a/src/arcade_collection/convert/convert_to_contours.py +++ b/src/arcade_collection/convert/convert_to_contours.py @@ -11,33 +11,64 @@ "side1": (0, 2, 1), "side2": (2, 1, 0), } +"""Axis rotations for different contour views.""" def convert_to_contours( series_key: str, - data_tar: tarfile.TarFile, + locations_tar: tarfile.TarFile, frame: int, regions: list[str], box: tuple[int, int, int], indices: dict[str, list[int]], ) -> dict[str, dict[str, dict[int, list]]]: - locations = extract_tick_json(data_tar, series_key, frame, "LOCATIONS") + """ + Convert data to iso-valued contours. + + Contours are calculated using "marching squares" method. Note that these + contours follow the iso-values, which means that a single "square" voxel + will produce a diamond-shaped contour. For the exact outline of a set of + voxels, consider using ``extract_voxel_contours`` from the + ``abm_shape_collection`` package. + + Parameters + ---------- + series_key + Simulation series key. + locations_tar + Archive of location data. + frame + Frame number. + regions + List of regions. + box + Size of bounding box. + indices + Map of view to slice indices. + + Returns + ------- + : + Map of region, view, and index to contours. + """ + + locations = extract_tick_json(locations_tar, series_key, frame, "LOCATIONS") contours: dict[str, dict[str, dict[int, list]]] = { - region: {view: {} for view in indices.keys()} for region in regions + region: {view: {} for view in indices} for region in regions } for location in locations: for region in regions: array = np.zeros(box) - voxels = get_location_voxels(location, region) + voxels = get_location_voxels(location, region if region != "DEFAULT" else None) if len(voxels) == 0: continue array[tuple(np.transpose(voxels))] = 1 - for view in indices.keys(): + for view in indices: array_rotated = np.moveaxis(array, [0, 1, 2], ROTATIONS[view]) for index in indices[view]: @@ -49,6 +80,9 @@ def convert_to_contours( if index not in contours[region][view]: contours[region][view][index] = [] - contours[region][view][index].extend(measure.find_contours(array_slice)) + array_contours = [ + contour.tolist() for contour in measure.find_contours(array_slice) + ] + contours[region][view][index].extend(array_contours) return contours diff --git a/src/arcade_collection/convert/convert_to_images.py b/src/arcade_collection/convert/convert_to_images.py index 68381d5..641bb4d 100644 --- a/src/arcade_collection/convert/convert_to_images.py +++ b/src/arcade_collection/convert/convert_to_images.py @@ -1,32 +1,94 @@ -import tarfile -from typing import Optional +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING import numpy as np from arcade_collection.output.extract_tick_json import extract_tick_json from arcade_collection.output.get_location_voxels import get_location_voxels +if TYPE_CHECKING: + import tarfile + + +class ImageType(Enum): + """Image conversion types.""" + + FULL = (False, False, False, False) + """Image with TCZYX dimensions.""" + + FULL_BINARY = (True, False, False, False) + """Binary image with TCZYX dimensions.""" + + FULL_BY_FRAME = (False, True, False, False) + """Image with TCZYX dimensions separated by frame.""" + + FULL_BINARY_BY_FRAME = (True, True, False, False) + """Binary image with TCZYX dimensions separated by frame.""" + + FLAT_BY_FRAME = (False, True, True, False) + """Image array flattened to YX dimensions separated by frame.""" + + FLAT_BINARY_BY_FRAME = (True, True, True, False) + """Binary array flattened to YX dimensions separated by frame.""" + + FLAT_RGBA_BY_FRAME = (False, True, True, True) + """RGBA array flattened to YX dimensions separated by frame .""" + def convert_to_images( series_key: str, - data_tar: tarfile.TarFile, + locations_tar: tarfile.TarFile, frame_spec: tuple[int, int, int], regions: list[str], box: tuple[int, int, int], chunk_size: int, - binary: bool = False, - separate: bool = False, - flatten: bool = False, -) -> list[tuple[int, int, np.ndarray, Optional[int]]]: + image_type: ImageType, +) -> list[tuple[int, int, np.ndarray, int | None]]: + """ + Convert data to image arrays. + + Images are extracted from lists of voxels. The initial converted image has + dimensions in TCZYX order, such that T encodes the specified frames and C + encodes the regions. The initial converted image is then further processed + based on selected image type. + + Parameters + ---------- + series_key + Simulation series key. + locations_tar + Archive of location data. + frame_spec + Specification for image frames. + regions + List of region channels. + box + Size of bounding box. + chunk_size + Size of each image chunk. + image_type + Image conversion type. + + Returns + ------- + : + List of image chunks, chunk indices, and frames. + """ + + binary, separate, _, reindex = image_type.value length, width, height = box frames = list(np.arange(*frame_spec)) - array = np.zeros((len(frames), len(regions), height, width, length), "uint16") + raw_array = np.zeros((len(frames), len(regions), height, width, length), "uint16") + + object_id = 1 for index, frame in enumerate(frames): - locations = extract_tick_json(data_tar, series_key, frame, "LOCATIONS") + locations = extract_tick_json(locations_tar, series_key, frame, "LOCATIONS") for location in locations: - location_id = location["id"] + value = object_id if binary or reindex else location["id"] for channel, region in enumerate(regions): voxels = [ @@ -39,17 +101,17 @@ def convert_to_images( if len(voxels) == 0: continue - array[index, channel][tuple(np.transpose(voxels))] = 1 if binary else location_id + raw_array[index, channel][tuple(np.transpose(voxels))] = value - if separate and flatten: - chunks = [ - (i, j, flatten_array_chunk(chunk), frame) - for index, frame in enumerate(frames) - for i, j, chunk in split_array_chunks(array[[index], :, :, :, :], chunk_size) - ] - elif separate: + if reindex: + object_id = object_id + 1 + + # Remove 1 pixel border. + array = raw_array[:, :, 1:-1, 1:-1, 1:-1].copy() + + if separate: chunks = [ - (i, j, chunk, frame) + (i, j, flatten_array_chunk(chunk, image_type), frame) for index, frame in enumerate(frames) for i, j, chunk in split_array_chunks(array[[index], :, :, :, :], chunk_size) ] @@ -60,18 +122,30 @@ def convert_to_images( def split_array_chunks(array: np.ndarray, chunk_size: int) -> list[tuple[int, int, np.ndarray]]: + """ + Split arrays into smaller chunks. + + Parameters + ---------- + array + Image array (dimensions in TCZYX order). + chunk_size + Size of each image chunk. + + Returns + ------- + : + List of array chunks and their relative indices. + """ + chunks = [] length = array.shape[4] width = array.shape[3] # Calculate chunk splits. - length_section = ( - [0, chunk_size + 1] + (int(length / chunk_size) - 2) * [chunk_size] + [chunk_size + 1] - ) + length_section = [0] + (int(length / chunk_size)) * [chunk_size] length_splits = np.array(length_section, dtype=np.int32).cumsum() - width_section = ( - [0, chunk_size + 1] + (int(width / chunk_size) - 2) * [chunk_size] + [chunk_size + 1] - ) + width_section = [0] + (int(width / chunk_size)) * [chunk_size] width_splits = np.array(width_section, dtype=np.int32).cumsum() # Iterate through each chunk split. @@ -92,13 +166,38 @@ def split_array_chunks(array: np.ndarray, chunk_size: int) -> list[tuple[int, in return chunks -def flatten_array_chunk(array: np.ndarray) -> np.ndarray: +def flatten_array_chunk(array: np.ndarray, image_type: ImageType) -> np.ndarray: + """ + Flatten array chunk along z axis. + + When flattening to an RGBA array, each object is encoded as a unique color + such that the object ID = R + G*256 + B*256*256 - 1 and background pixels + are black (R = 0, G = 0, B = 0). + + Parameters + ---------- + array + Image array (dimensions in TCZYX order). + image_type + Image conversion type. + + Returns + ------- + : + Flattened image array. + """ + array_flat = array[0, 0, :, :, :].max(axis=0) - array_rgba = np.zeros((*array_flat.shape, 4), dtype=np.uint8) - array_rgba[:, :, 0] = (array_flat & 0x000000FF) >> 0 - array_rgba[:, :, 1] = (array_flat & 0x0000FF00) >> 8 - array_rgba[:, :, 2] = (array_flat & 0x00FF0000) >> 16 - array_rgba[:, :, 3] = 255 # (array_flat & 0x00FF0000) >> 24 + if image_type == ImageType.FLAT_RGBA_BY_FRAME: + array_rgba = np.zeros((*array_flat.shape, 4), dtype=np.uint8) + array_rgba[:, :, 0] = (array_flat & 0x000000FF) >> 0 + array_rgba[:, :, 1] = (array_flat & 0x0000FF00) >> 8 + array_rgba[:, :, 2] = (array_flat & 0x00FF0000) >> 16 + array_rgba[:, :, 3] = 255 # (array_flat & 0x00FF0000) >> 24 + return array_rgba + + if image_type in (ImageType.FLAT_BY_FRAME, ImageType.FLAT_BINARY_BY_FRAME): + return array_flat - return array_rgba + return array diff --git a/src/arcade_collection/convert/convert_to_meshes.py b/src/arcade_collection/convert/convert_to_meshes.py index 73ecbd4..8ea0055 100644 --- a/src/arcade_collection/convert/convert_to_meshes.py +++ b/src/arcade_collection/convert/convert_to_meshes.py @@ -1,46 +1,88 @@ -import tarfile -from typing import Optional, Union +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING import numpy as np -import pandas as pd from skimage import measure from arcade_collection.output.extract_tick_json import extract_tick_json from arcade_collection.output.get_location_voxels import get_location_voxels +if TYPE_CHECKING: + import tarfile + + import pandas as pd + +MAX_ARRAY_LEVEL = 7 +"""Maximum array level for conversion to meshes.""" + + +class MeshType(Enum): + """Mesh face types.""" + + DEFAULT = False + """Mesh with default faces.""" + + INVERTED = True + """Mesh with inverted faces.""" + def convert_to_meshes( series_key: str, - data_tar: tarfile.TarFile, + locations_tar: tarfile.TarFile, frame_spec: tuple[int, int, int], regions: list[str], box: tuple[int, int, int], - invert: Union[bool, dict[str, bool]] = False, - group_size: Optional[int] = None, - categories: Optional[pd.DataFrame] = None, + mesh_type: MeshType | dict[str, MeshType] = MeshType.DEFAULT, + group_size: int | None = None, + categories: pd.DataFrame | None = None, ) -> list[tuple[int, int, str, str]]: + """ + Convert data to mesh OBJ contents. + + Parameters + ---------- + series_key + Simulation series key. + locations_tar + Archive of location data. + frame_spec + Specification for mesh frames. + regions + List of regions. + box + Size of bounding box. + mesh_type + Mesh face type. + group_size + Number of objects in each group (if grouping meshes). + categories + Simulation data containing ID, FRAME, and CATEGORY. + + Returns + ------- + : + List of mesh frames, indices, regions, and OBJ contents. + """ frames = list(np.arange(*frame_spec)) meshes = [] length, width, height = box - - if group_size is not None: - groups = make_mesh_groups(categories, frames, group_size) - else: - groups = None + groups = make_mesh_groups(categories, frames, group_size) if group_size is not None else None for frame in frames: - locations = extract_tick_json(data_tar, series_key, frame, "LOCATIONS") + locations = extract_tick_json(locations_tar, series_key, frame, "LOCATIONS") for region in regions: - region_invert = invert[region] if isinstance(invert, dict) else invert + region_mesh_type = mesh_type[region] if isinstance(mesh_type, dict) else mesh_type if groups is None: for location in locations: location_id = location["id"] mesh = make_individual_mesh( - location, length, width, height, region, region_invert + location, length, width, height, region, region_mesh_type ) if mesh is None: @@ -53,7 +95,7 @@ def convert_to_meshes( location for location in locations if location["id"] in group ] mesh = make_combined_mesh( - group_locations, length, width, height, region, region_invert + group_locations, length, width, height, region, region_mesh_type ) if mesh is None: @@ -67,6 +109,24 @@ def convert_to_meshes( def make_mesh_groups( categories: pd.DataFrame, frames: list[int], group_size: int ) -> dict[int, dict[int, list[int]]]: + """ + Group objects based on group size and categories. + + Parameters + ---------- + categories + Simulation data containing ID, FRAME, and CATEGORY. + frames + List of frames. + group_size + Number of objects in each group. + + Returns + ------- + : + Map of frame to map of index to location ids. + """ + groups: dict[int, dict[int, list[int]]] = {} for frame in frames: @@ -85,8 +145,37 @@ def make_mesh_groups( def make_individual_mesh( - location: dict, length: int, width: int, height: int, region: str, invert: bool -) -> Optional[str]: + location: dict, + length: int, + width: int, + height: int, + region: str, + mesh_type: MeshType = MeshType.DEFAULT, +) -> str | None: + """ + Create mesh containing a single object. + + Parameters + ---------- + location + Location object. + length + Bounding box length. + width + Bounding box width. + height + Bounding box height. + region + Region name. + mesh_type + Mesh face type. + + Returns + ------- + : + Single mesh OBJ file contents. + """ + voxels = [ (x, width - y - 1, z) for x, y, z in get_location_voxels(location, region if region != "DEFAULT" else None) @@ -98,14 +187,41 @@ def make_individual_mesh( center = list(np.array(voxels).mean(axis=0)) array = make_mesh_array(voxels, length, width, height) verts, faces, normals = make_mesh_geometry(array, center) - mesh = make_mesh_file(verts, faces, normals, invert) - - return mesh + return make_mesh_file(verts, faces, normals, mesh_type) def make_combined_mesh( - locations: list[dict], length: int, width: int, height: int, region: str, invert: bool -) -> Optional[str]: + locations: list[dict], + length: int, + width: int, + height: int, + region: str, + mesh_type: MeshType = MeshType.DEFAULT, +) -> str | None: + """ + Create mesh containing multiple objects. + + Parameters + ---------- + locations + List of location objects. + length + Bounding box length. + width + Bounding box width. + height + Bounding box height. + region + Region name. + mesh_type + Mesh face type. + + Returns + ------- + : + Combined mesh OBJ file contents. + """ + meshes = [] offset = 0 @@ -121,7 +237,7 @@ def make_combined_mesh( center = [length / 2, width / 2, height / 2] array = make_mesh_array(voxels, length, width, height) verts, faces, normals = make_mesh_geometry(array, center, offset) - mesh = make_mesh_file(verts, faces, normals, invert) + mesh = make_mesh_file(verts, faces, normals, mesh_type) meshes.append(mesh) offset = offset + len(verts) @@ -134,9 +250,32 @@ def make_combined_mesh( def make_mesh_array( voxels: list[tuple[int, int, int]], length: int, width: int, height: int ) -> np.ndarray: + """ + Generate array from list of voxels. + + Given voxel locations are set to the max array level. The array is smoothed + such that all other locations are set to the number of max-level neighbors. + + Parameters + ---------- + voxels + List of voxels representing object. + length + Bounding box length. + width + Bounding box width. + height + Bounding box height. + + Returns + ------- + : + Array representing object. + """ + # Create array. array = np.zeros((length, width, height), dtype=np.uint8) - array[tuple(np.transpose(voxels))] = 7 + array[tuple(np.transpose(voxels))] = MAX_ARRAY_LEVEL # Get set of zero neighbors for all voxels. offsets = [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)] @@ -156,7 +295,9 @@ def make_mesh_array( # Smooth array levels based on neighbor counts. for x, y, z in neighbors: - array[x, y, z] = sum(array[x + i, y + j, z + k] == 7 for i, j, k in offsets) + 1 + array[x, y, z] = ( + sum(array[x + i, y + j, z + k] == MAX_ARRAY_LEVEL for i, j, k in offsets) + 1 + ) return array @@ -164,7 +305,26 @@ def make_mesh_array( def make_mesh_geometry( array: np.ndarray, center: list[float], offset: int = 0 ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - verts, faces, normals, _ = measure.marching_cubes(array, level=3, allow_degenerate=False) + """ + Generate mesh from array. + + Parameters + ---------- + array + Array representing object. + center + Coordinate of object center. + offset + Offset for face indices. + + Returns + ------- + : + Arrays of mesh vertices, faces, and normals. + """ + + level = int(MAX_ARRAY_LEVEL / 2) + verts, faces, normals, _ = measure.marching_cubes(array, level=level, allow_degenerate=False) # Center the vertices. verts[:, 0] = verts[:, 0] - center[0] @@ -178,8 +338,33 @@ def make_mesh_geometry( def make_mesh_file( - verts: np.ndarray, faces: np.ndarray, normals: np.ndarray, invert: bool = False + verts: np.ndarray, + faces: np.ndarray, + normals: np.ndarray, + mesh_type: MeshType = MeshType.DEFAULT, ) -> str: + """ + Create mesh OBJ file contents from marching cubes output. + + If + + Parameters + ---------- + verts + Array of mesh vertices. + faces + Array of mesh faces. + normals + Array of mesh normals. + mesh_type + Mesh face type. + + Returns + ------- + : + Mesh OBJ file. + """ + mesh = "" for item in verts: @@ -189,7 +374,7 @@ def make_mesh_file( mesh += f"vn {item[0]} {item[1]} {item[2]}\n" for item in faces: - if invert: + if mesh_type == MeshType.INVERTED: mesh += f"f {item[0]}//{item[0]} {item[1]}//{item[1]} {item[2]}//{item[2]}\n" else: mesh += f"f {item[2]}//{item[2]} {item[1]}//{item[1]} {item[0]}//{item[0]}\n" diff --git a/src/arcade_collection/convert/convert_to_projection.py b/src/arcade_collection/convert/convert_to_projection.py index c6736c7..0e18a45 100644 --- a/src/arcade_collection/convert/convert_to_projection.py +++ b/src/arcade_collection/convert/convert_to_projection.py @@ -9,7 +9,7 @@ def convert_to_projection( series_key: str, - data_tar: tarfile.TarFile, + locations_tar: tarfile.TarFile, frame: int, regions: list[str], box: tuple[int, int, int], @@ -18,6 +18,36 @@ def convert_to_projection( scale: int, colors: dict[str, str], ) -> mpl.figure.Figure: + """ + Convert data to projection figure. + + Parameters + ---------- + series_key + Simulation series key. + locations_tar + Archive of location data. + frame + Frame number. + regions + List of regions. + box + Size of bounding box. + ds + Spatial scaling in um/voxel. + dt + Temporal scaling in hours/tick. + scale + Size of scale bar (in um). + colors + Map of region to colors. + + Returns + ------- + : + Projection figure. + """ + fig = plt.figure(figsize=(10, 10), constrained_layout=True) length, width, height = box @@ -47,7 +77,7 @@ def convert_to_projection( "side1": list(range(1, width)), "side2": list(range(1, length)), } - contours = convert_to_contours(series_key, data_tar, frame, regions, box, indices) + contours = convert_to_contours(series_key, locations_tar, frame, regions, box, indices) for region in regions: color = colors[region] @@ -73,6 +103,25 @@ def convert_to_projection( def add_frame_timestamp( ax: mpl.axes.Axes, length: int, width: int, dt: float, frame: int, color: str ) -> None: + """ + Add a frame timestamp to figure axes. + + Parameters + ---------- + ax + Axes object. + length + Length of bounding box. + width + Width of bounding box. + dt + Temporal scaling in hours/tick. + frame + Frame number. + color + Timestamp color. + """ + hours, minutes = divmod(round(frame * dt, 2), 1) timestamp = f"{int(hours):02d}H:{round(minutes*60):02d}M" @@ -90,6 +139,25 @@ def add_frame_timestamp( def add_frame_scalebar( ax: mpl.axes.Axes, length: int, width: int, ds: float, scale: int, color: str ) -> None: + """ + Add a frame scalebar to figure axes. + + Parameters + ---------- + ax + Axes object. + length + Length of bounding box. + width + Width of bounding box. + ds + Spatial scaling in um/voxel. + scale + Size of scale bar (in um). + color + Scalebar color. + """ + scalebar = scale / ds ax.add_patch( diff --git a/src/arcade_collection/convert/convert_to_simularium.py b/src/arcade_collection/convert/convert_to_simularium.py index 30ee9f0..6c436e0 100644 --- a/src/arcade_collection/convert/convert_to_simularium.py +++ b/src/arcade_collection/convert/convert_to_simularium.py @@ -1,9 +1,10 @@ +from __future__ import annotations + import itertools import random -from typing import Optional, Union +from typing import TYPE_CHECKING import numpy as np -import pandas as pd from simulariumio import ( DISPLAY_TYPE, AgentData, @@ -16,58 +17,99 @@ TrajectoryData, UnitData, ) +from simulariumio.constants import DEFAULT_CAMERA_SETTINGS, VIZ_TYPE + +if TYPE_CHECKING: + import pandas as pd + CAMERA_POSITIONS: dict[str, tuple[float, float, float]] = { "patch": (0.0, -0.5, 900), "potts": (10.0, 0.0, 200.0), } +"""Default camera positions for different simulation types.""" CAMERA_LOOK_AT: dict[str, tuple[float, float, float]] = { "patch": (0.0, -0.2, 0.0), "potts": (10.0, 0.0, 0.0), } +"""Default camera look at positions for different simulation types.""" def convert_to_simularium( series_key: str, simulation_type: str, data: pd.DataFrame, - length: Union[int, float], - width: Union[int, float], - height: Union[int, float], - ds: float, - dz: float, + length: float, + width: float, + height: float, + ds: tuple[float, float, float], dt: float, colors: dict[str, str], - url: Optional[str] = None, + url: str = "", + jitter: float = 1.0, ) -> str: - meta_data = get_meta_data(series_key, simulation_type, length, width, height, ds, dz) + """ + Convert data to Simularium trajectory. + + Parameters + ---------- + series_key + Simulation series key. + simulation_type + Simulation type. + data + Simulation trajectory data. + length + Bounding box length. + width + Bounding box width. + height + Bounding box height. + ds + Spatial scaling in um/voxel. + dt + Temporal scaling in hours/tick. + colors + Color mapping. + url + Url prefix for meshes. + jitter + Jitter applied to colors. + + Returns + ------- + : + Simularium trajectory. + """ + + meta_data = get_meta_data(series_key, simulation_type, length, width, height, *ds) agent_data = get_agent_data(data) - agent_data.display_data = get_display_data(data, colors, url) + agent_data.display_data = get_display_data(data, colors, url, jitter) for index, (frame, group) in enumerate(data.groupby("frame")): n_agents = len(group) agent_data.times[index] = float(frame) * dt agent_data.n_agents[index] = n_agents - agent_data.unique_ids[index][:n_agents] = range(0, n_agents) + agent_data.unique_ids[index][:n_agents] = range(n_agents) agent_data.types[index][:n_agents] = group["name"] agent_data.radii[index][:n_agents] = group["radius"] agent_data.positions[index][:n_agents] = group[["x", "y", "z"]] agent_data.n_subpoints[index][:n_agents] = group["points"].map(len) - agent_data.viz_types[index][:n_agents] = group["points"].map( - lambda points: 1001 if points else 1000 + agent_data.viz_types[index][:n_agents] = group["display"].map( + lambda display: VIZ_TYPE.FIBER if display == "FIBER" else VIZ_TYPE.DEFAULT ) - agent_data.subpoints[index][:n_agents] = np.array( - list(itertools.zip_longest(*group["points"], fillvalue=0)) - ).T + points = np.array(list(itertools.zip_longest(*group["points"], fillvalue=0))).T + if len(points) != 0: + agent_data.subpoints[index][:n_agents] = points - agent_data.positions[:, :, 0] = (agent_data.positions[:, :, 0] - length / 2.0) * ds - agent_data.positions[:, :, 1] = (width / 2.0 - agent_data.positions[:, :, 1]) * ds - agent_data.positions[:, :, 2] = (agent_data.positions[:, :, 2] - height / 2.0) * dz + agent_data.positions[:, :, 0] = (agent_data.positions[:, :, 0] - length / 2.0) * ds[0] + agent_data.positions[:, :, 1] = (width / 2.0 - agent_data.positions[:, :, 1]) * ds[1] + agent_data.positions[:, :, 2] = (agent_data.positions[:, :, 2] - height / 2.0) * ds[2] - agent_data.subpoints[:, :, 0::3] = (agent_data.subpoints[:, :, 0::3]) * ds - agent_data.subpoints[:, :, 1::3] = (-agent_data.subpoints[:, :, 1::3]) * ds - agent_data.subpoints[:, :, 2::3] = (agent_data.subpoints[:, :, 2::3]) * dz + agent_data.subpoints[:, :, 0::3] = (agent_data.subpoints[:, :, 0::3]) * ds[0] + agent_data.subpoints[:, :, 1::3] = (-agent_data.subpoints[:, :, 1::3]) * ds[1] + agent_data.subpoints[:, :, 2::3] = (agent_data.subpoints[:, :, 2::3]) * ds[2] return TrajectoryConverter( TrajectoryData( @@ -82,31 +124,88 @@ def convert_to_simularium( def get_meta_data( series_key: str, simulation_type: str, - length: Union[int, float], - width: Union[int, float], - height: Union[int, float], - ds: float, + length: float, + width: float, + height: float, + dx: float, + dy: float, dz: float, ) -> MetaData: - meta_data = MetaData( - box_size=np.array([length * ds, width * ds, height * dz]), + """ + Create MetaData object. + + If the simulation type has defined camera settings, those will be used. + Otherwise, the global camera defaults will be used. + + Parameters + ---------- + series_key + Simulation series key. + simulation_type + Simulation type. + length + Bounding box length. + width + Bounding box width. + height + Bounding box height. + dx + Spatial scaling in the X direction in um/voxel. + dy + Spatial scaling in the Y direction in um/voxel. + dz + Spatial scaling in the Z direction in um/voxel. + + Returns + ------- + : + MetaData object. + """ + + return MetaData( + box_size=np.array([length * dx, width * dy, height * dz]), camera_defaults=CameraData( - position=np.array(CAMERA_POSITIONS[simulation_type]), - look_at_position=np.array(CAMERA_LOOK_AT[simulation_type]), + position=np.array( + CAMERA_POSITIONS.get(simulation_type, DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION) + ), + look_at_position=np.array( + CAMERA_LOOK_AT.get(simulation_type, DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION) + ), fov_degrees=60.0, ), trajectory_title=f"ARCADE - {series_key}", model_meta_data=ModelMetaData( title="ARCADE", version=simulation_type, - description=(f"Agent-based modeling framework ARCADE for {series_key}."), + description=f"Agent-based modeling framework ARCADE for {series_key}.", ), ) - return meta_data - def get_agent_data(data: pd.DataFrame) -> AgentData: + """ + Create empty AgentData object. + + Method uses the "frame", "name", and "points" columns in data to generate + the AgentData object. + + The number of unique entries in the "frame" column determines the total + number of frames dimension. The maximum number of entries in the "name" + column (for a given frame) determines the maximum number of agents + dimension. The maximum number of subpoints is determined by the length of + the longest list in the "points" column (which may be zero). + + Parameters + ---------- + data + Simulation trajectory data. + + Returns + ------- + : + AgentData object. + """ + total_frames = len(data["frame"].unique()) max_agents = data.groupby("frame")["name"].count().max() max_subpoints = data["points"].map(len).max() @@ -114,43 +213,99 @@ def get_agent_data(data: pd.DataFrame) -> AgentData: def get_display_data( - data: pd.DataFrame, colors: dict[str, str], url: Optional[str] = None + data: pd.DataFrame, colors: dict[str, str], url: str = "", jitter: float = 1.0 ) -> DisplayData: + """ + Create map of DisplayData objects. + + Method uses the "name" and "display" columns in data to generate the + DisplayData objects. + + The "name" column should be a string in one of the following forms: + + - ``(index)#(color_key)`` + - ``(group)#(color_key)#(index)`` + - ``(group)#(color_key)#(index)#(frame)`` + + where ``(index)`` becomes DisplayData object name and ``(color_key)`` is + passed to the color mapping to select the DisplayData color (optional color + jitter may be applied). + + The "display" column should be a valid ``DISPLAY_TYPE``. For the + ``DISPLAY_TYPE.OBJ`` type, a URL prefix must be used and names should be in + the form ``(group)#(color_key)#(index)#(frame)``, which is used to generate + the full URL formatted as: ``(url)/(frame)_(group)_(index).MESH.obj``. Note + that ``(frame)`` is zero-padded to six digits and ``(index)`` is zero-padded + to three digits. + + Parameters + ---------- + data + Simulation trajectory data. + colors + Color mapping. + url + Url prefix for meshes. + jitter + Jitter applied to colors. + + Returns + ------- + : + Map of DisplayData objects. + """ + display_data = {} + display_types = sorted(set(zip(data["name"], data["display"]))) - for name in data["name"].unique(): - if name.count("#") == 3: + for name, display_type in display_types: + if name.count("#") == 1: + index, color_key = name.split("#") + elif name.count("#") == 2: # noqa: PLR2004 + _, color_key, index = name.split("#") + elif name.count("#") == 3: # noqa: PLR2004 group, color_key, index, frame = name.split("#") - elif name.count("#") == 2: - group, index, color_key = name.split("#") - random.seed(index) - jitter = (random.random() - 0.5) / 2 - - if url is not None: - display_data[name] = DisplayData( - name=index, - display_type=DISPLAY_TYPE.OBJ, - url=f"{url}/{int(frame):06d}_{group}_{int(index):03d}.MESH.obj", - color=shade_color(colors[color_key], jitter), - ) - elif index is None: - display_data[name] = DisplayData( - name=group, - display_type=DISPLAY_TYPE.FIBER, - color=colors[color_key], - ) + if url != "" and display_type == "OBJ": + full_url = f"{url}/{int(frame):06d}_{group}_{int(index):03d}.MESH.obj" else: - display_data[name] = DisplayData( - name=index, - display_type=DISPLAY_TYPE.SPHERE, - color=shade_color(colors[color_key], jitter), - ) + full_url = "" + + random.seed(index) + alpha = jitter * (random.random() - 0.5) / 2 # noqa: S311 + + display_data[name] = DisplayData( + name=index, + display_type=DISPLAY_TYPE[display_type], + color=shade_color(colors[color_key], alpha), + url=full_url, + ) return display_data def shade_color(color: str, alpha: float) -> str: + """ + Shade color by specified alpha. + + Positive values of alpha will blend the given color with white (alpha = 1.0 + returns pure white), while negative values of alpha will blend the given + color with black (alpha = -1.0 returns pure black). An alpha = 0.0 will + leave the color unchanged. + + Parameters + ---------- + color + Original color as hex string. + alpha + Shading value between -1 and +1. + + Returns + ------- + : + Shaded color as hex string. + """ + old_color = color.replace("#", "") old_red, old_green, old_blue = [int(old_color[i : i + 2], 16) for i in (0, 2, 4)] layer_color = 0 if alpha < 0 else 255 diff --git a/src/arcade_collection/convert/convert_to_simularium_objects.py b/src/arcade_collection/convert/convert_to_simularium_objects.py index c99930b..61386cf 100644 --- a/src/arcade_collection/convert/convert_to_simularium_objects.py +++ b/src/arcade_collection/convert/convert_to_simularium_objects.py @@ -8,16 +8,52 @@ def convert_to_simularium_objects( series_key: str, simulation_type: str, categories: pd.DataFrame, - frame_spec: tuple[int, int, int], regions: list[str], + frame_spec: tuple[int, int, int], box: tuple[int, int, int], - ds: float, - dz: float, + ds: tuple[float, float, float], dt: float, colors: dict[str, str], group_size: int, url: str, + jitter: float = 1.0, ) -> str: + """ + Convert data to Simularium trajectory using mesh objects. + + Parameters + ---------- + series_key + Simulation series key. + simulation_type : {'potts'} + Simulation type. + categories + Simulation data containing ID, FRAME, and CATEGORY. + regions + List of regions. + frame_spec + Specification for simulation ticks. + box + Size of bounding box. + ds + Spatial scaling in um/voxel. + dt + Temporal scaling in hours/tick. + colors + Map of category to colors. + group_size + Number of objects in each mesh group. + url + URL for mesh object files. + jitter + Relative jitter applied to colors (set to 0 for exact colors). + + Returns + ------- + : + Simularium trajectory. + """ + if simulation_type == "potts": frames = list(map(int, np.arange(*frame_spec))) length, width, height = box @@ -25,10 +61,11 @@ def convert_to_simularium_objects( categories, frames, group_size, regions, length, width, height ) else: - raise ValueError(f"invalid simulation type {simulation_type}") + message = f"invalid simulation type {simulation_type}" + raise ValueError(message) return convert_to_simularium( - series_key, simulation_type, data, length, width, height, ds, dz, dt, colors, url + series_key, simulation_type, data, length, width, height, ds, dt, colors, url, jitter ) @@ -41,6 +78,32 @@ def format_potts_for_objects( width: int, height: int, ) -> pd.DataFrame: + """ + Format ``potts`` simulation data for object-based Simularium trajectory. + + Parameters + ---------- + categories + Simulation data containing ID, FRAME, and CATEGORY. + frames + List of frames. + group_size + Number of objects in each mesh group. + regions + List of regions. + length + Length of bounding box. + width + Width of bounding box. + height + Height of bounding box. + + Returns + ------- + : + Data formatted for trajectory. + """ + data: list[list[object]] = [] center = [length / 2, width / 2, height / 2] @@ -57,8 +120,10 @@ def format_potts_for_objects( for region in regions: name = f"{region}#{category}#{index}#{frame}" - data = data + [[name, int(frame), 1] + center + [[]]] + data = [*data, [name, int(frame), 1, *center, [], "OBJ"]] index_offset = index_offset + len(group_ids) - return pd.DataFrame(data, columns=["name", "frame", "radius", "x", "y", "z", "points"]) + return pd.DataFrame( + data, columns=["name", "frame", "radius", "x", "y", "z", "points", "display"] + ) diff --git a/src/arcade_collection/convert/convert_to_simularium_shapes.py b/src/arcade_collection/convert/convert_to_simularium_shapes.py index 9e101c1..243d5c7 100644 --- a/src/arcade_collection/convert/convert_to_simularium_shapes.py +++ b/src/arcade_collection/convert/convert_to_simularium_shapes.py @@ -1,7 +1,8 @@ +from __future__ import annotations + import random -import tarfile from math import cos, isnan, pi, sin, sqrt -from typing import Optional, Union +from typing import TYPE_CHECKING import numpy as np import pandas as pd @@ -10,6 +11,9 @@ from arcade_collection.output.extract_tick_json import extract_tick_json from arcade_collection.output.get_location_voxels import get_location_voxels +if TYPE_CHECKING: + import tarfile + CELL_STATES: list[str] = [ "UNDEFINED", "APOPTOTIC", @@ -19,6 +23,7 @@ "SENESCENT", "NECROTIC", ] +"""Indexed cell states.""" EDGE_TYPES: list[str] = [ "ARTERIOLE", @@ -28,6 +33,7 @@ "VENULE", "UNDEFINED", ] +"""Indexed graph edge types.""" def convert_to_simularium_shapes( @@ -36,73 +42,127 @@ def convert_to_simularium_shapes( data_tars: dict[str, tarfile.TarFile], frame_spec: tuple[int, int, int], box: tuple[int, int, int], - ds: float, - dz: float, + ds: tuple[float, float, float], dt: float, colors: dict[str, str], resolution: int = 0, + jitter: float = 1.0, ) -> str: + """ + Convert data to Simularium trajectory using shapes. + + Parameters + ---------- + series_key + Simulation series key. + simulation_type : {'patch', 'potts'} + Simulation type. + data_tars + Map of simulation data archives. + frame_spec + Specification for simulation ticks. + box + Size of bounding box. + ds + Spatial scaling in um/voxel. + dt + Temporal scaling in hours/tick. + colors + Map of category to colors. + resolution + Number of voxels represented by a sphere (0 for single sphere per cell). + jitter + Relative jitter applied to colors (set to 0 for exact colors). + + Returns + ------- + : + Simularium trajectory. + """ + + # Throw exception if invalid simulation type. + if simulation_type not in ("patch", "potts"): + message = f"invalid simulation type {simulation_type}" + raise ValueError(message) if simulation_type == "patch": + # Simulation type must have either or both "cells" and "graph" data + if not ("cells" in data_tars or "graph" in data_tars): + return "" + frames = list(map(float, np.arange(*frame_spec))) radius, margin, height = box - bounds = radius + margin - length = (2 / sqrt(3)) * (3 * (radius + margin) - 1) - width = 4 * (radius + margin) - 2 + bounds, length, width = calculate_patch_size(radius, margin) data = format_patch_for_shapes( - series_key, data_tars["cells"], data_tars["graph"], frames, bounds + series_key, data_tars.get("cells"), data_tars.get("graph"), frames, bounds ) elif simulation_type == "potts": + # Simulation type must have both "cells" and "locations" data + if not ("cells" in data_tars and "locations" in data_tars): + return "" + frames = list(map(int, np.arange(*frame_spec))) length, width, height = box data = format_potts_for_shapes( series_key, data_tars["cells"], data_tars["locations"], frames, resolution ) - else: - raise ValueError(f"invalid simulation type {simulation_type}") return convert_to_simularium( - series_key, simulation_type, data, length, width, height, ds, dz, dt, colors + series_key, simulation_type, data, length, width, height, ds, dt, colors, jitter=jitter ) def format_patch_for_shapes( series_key: str, - cells_tar: tarfile.TarFile, - graph_tar: Optional[tarfile.TarFile], - frames: list[Union[int, float]], + cells_tar: tarfile.TarFile | None, + graph_tar: tarfile.TarFile | None, + frames: list[float], bounds: int, ) -> pd.DataFrame: - data: list[list[Union[int, str, float]]] = [] - - theta = [pi * (60 * i) / 180.0 for i in range(6)] - dx = [cos(t) / sqrt(3) for t in theta] - dy = [sin(t) / sqrt(3) for t in theta] + """ + Format ``patch`` simulation data for shape-based Simularium trajectory. + + Parameters + ---------- + series_key + Simulation series key. + cells_tar + Archive of cell agent data. + graph_tar + Archive of vascular graph data. + frames + List of frames. + bounds + Simulation bounds size (radius + margin). + + Returns + ------- + : + Data formatted for trajectory. + """ + + data: list[list[int | str | float | list]] = [] for frame in frames: - cell_timepoint = extract_tick_json(cells_tar, series_key, frame, field="cells") + if cells_tar is not None: + cell_timepoint = extract_tick_json(cells_tar, series_key, frame, field="cells") - for location, cells in cell_timepoint: - u, v, w, z = location - rotation = random.randint(0, 5) + for location, cells in cell_timepoint: + u, v, w, z = location + rotation = random.randint(0, 5) # noqa: S311 - for cell in cells: - _, population, state, position, volume, _ = cell - cell_id = f"{u}{v}{w}{z}{position}" + for cell in cells: + _, population, state, position, volume, _ = cell + cell_id = f"{u}{v}{w}{z}{position}" - name = f"POPULATION{population}#{cell_id}#{CELL_STATES[state]}" - radius = (volume ** (1.0 / 3)) / 1.5 + name = f"POPULATION{population}#{CELL_STATES[state]}#{cell_id}" + radius = float("%.2g" % ((volume ** (1.0 / 3)) / 1.5)) # round to 2 sig figs - x = (3 * (u + bounds) - 1) / sqrt(3) - y = (v - w) + 2 * bounds - 1 - - center = [ - (x + dx[(position + rotation) % 6]), - (y + dy[(position + rotation) % 6]), - z, - ] + offset = (position + rotation) % 6 + x, y = convert_hexagonal_to_rectangular_coordinates((u, v, w), bounds, offset) + center = [x, y, z] - data = data + [[name, frame, radius] + center + [[]]] + data = [*data, [name, frame, radius, *center, [], "SPHERE"]] if graph_tar is not None: graph_timepoint = extract_tick_json( @@ -112,7 +172,7 @@ def format_patch_for_shapes( for from_node, to_node, edge in graph_timepoint: edge_type, radius, _, _, _, _, flow = edge - name = f"VASCULATURE##{'UNDEFINED' if isnan(flow) else EDGE_TYPES[edge_type + 2]}" + name = f"VASCULATURE#{'UNDEFINED' if isnan(flow) else EDGE_TYPES[edge_type + 2]}" subpoints = [ from_node[0] / sqrt(3), @@ -123,18 +183,112 @@ def format_patch_for_shapes( to_node[2], ] - data = data + [[name, frame, radius] + [0, 0, 0] + [subpoints]] + data = [*data, [name, frame, radius, 0, 0, 0, subpoints, "FIBER"]] - return pd.DataFrame(data, columns=["name", "frame", "radius", "x", "y", "z", "points"]) + return pd.DataFrame( + data, columns=["name", "frame", "radius", "x", "y", "z", "points", "display"] + ) + + +def convert_hexagonal_to_rectangular_coordinates( + uvw: tuple[int, int, int], bounds: int, offset: int +) -> tuple[float, float]: + """ + Convert hexagonal (u, v, w) coordinates to rectangular (x, y) coordinates. + + Conversion is based on the bounds of the simulation, + + Parameters + ---------- + uvw + Hexagonal (u, v, w) coordinates. + bounds + Simulation bounds size (radius + margin). + offset + Index of hexagonal offset. + + Returns + ------- + : + Rectangular (x, y) coordinates. + """ + + u, v, w = uvw + theta = [pi * (60 * i) / 180.0 for i in range(6)] + dx = [cos(t) / sqrt(3) for t in theta] + dy = [sin(t) / sqrt(3) for t in theta] + + x = (3 * (u + bounds) - 1) / sqrt(3) + y = (v - w) + 2 * bounds - 1 + + return x + dx[offset], y + dy[offset] + + +def calculate_patch_size(radius: int, margin: int) -> tuple[int, float, float]: + """ + Calculate hexagonal patch simulation sizes. + + Parameters + ---------- + radius + Number of hexagonal patches from the center patch. + margin + Number of hexagonal patches in the margin. + + Returns + ------- + : + Bounds, length, and width of the simulation bounding box. + """ + + bounds = radius + margin + length = (2 / sqrt(3)) * (3 * bounds - 1) + width = 4 * bounds - 2 + + return bounds, length, width def format_potts_for_shapes( series_key: str, cells_tar: tarfile.TarFile, locations_tar: tarfile.TarFile, - frames: list[Union[int, float]], + frames: list[float], resolution: int, ) -> pd.DataFrame: + """ + Format `potts` simulation data for shape-based Simularium trajectory. + + The resolution parameter can be used to tune how many spheres are used to + represent each cell. Resolution = 0 displays each cell as a single sphere + centered on the average voxel position. Resolution = 1 displays each + individual voxel of each cell as a single sphere. + + Resolution = N will aggregate voxels by dividing the voxels into NxNxN + cubes, and replacing cubes with at least 50% of those voxels occupied with a + single sphere centered at the center of the cube. + + For resolution > 0, interior voxels (fully surrounded voxels) are not + removed. + + Parameters + ---------- + series_key + Simulation series key. + cells_tar + Archive of cell data. + locations_tar + Archive of location data. + frames + List of frames. + resolution + Number of voxels represented by a sphere (0 for single sphere per cell). + + Returns + ------- + : + Data formatted for trajectory. + """ + data: list[list[object]] = [] for frame in frames: @@ -145,14 +299,13 @@ def format_potts_for_shapes( regions = [loc["region"] for loc in location["location"]] for region in regions: - name = f"{region}#{cell['id']}#{cell['phase']}" - + name = f"{region}#{cell['phase']}#{cell['id']}" all_voxels = get_location_voxels(location, region if region != "DEFAULT" else None) if resolution == 0: - radius = (len(all_voxels) ** (1.0 / 3)) / 1.5 + radius = approximate_radius_from_voxels(len(all_voxels)) center = list(np.array(all_voxels).mean(axis=0)) - data = data + [[name, int(frame), radius] + center + [[]]] + data = [*data, [name, int(frame), radius, *center, [], "SPHERE"]] else: radius = resolution / 2 center_offset = (resolution - 1) / 2 @@ -165,25 +318,61 @@ def format_potts_for_shapes( ] data = data + [ - [name, int(frame), radius] + voxel + [[]] for voxel in center_voxels + [name, int(frame), radius, *voxel, [], "SPHERE"] for voxel in center_voxels ] - return pd.DataFrame(data, columns=["name", "frame", "radius", "x", "y", "z", "points"]) + return pd.DataFrame( + data, columns=["name", "frame", "radius", "x", "y", "z", "points", "display"] + ) + + +def approximate_radius_from_voxels(voxels: int) -> float: + """ + Approximate display sphere radius from number of voxels. + + Parameters + ---------- + voxels + Number of voxels. + + Returns + ------- + : + Approximate radius. + """ + + return (voxels ** (1.0 / 3)) / 1.5 def get_resolution_voxels( voxels: list[tuple[int, int, int]], resolution: int ) -> list[tuple[int, int, int]]: - df = pd.DataFrame(voxels, columns=["x", "y", "z"]) + """ + Get voxels at specified resolution. + + Parameters + ---------- + voxels + List of voxels. + resolution + Resolution of voxels. + + Returns + ------- + : + List of voxels at specified resolution. + """ - min_x, min_y, min_z = df.min() - max_x, max_y, max_z = df.max() + voxel_df = pd.DataFrame(voxels, columns=["x", "y", "z"]) + + min_x, min_y, min_z = voxel_df.min() + max_x, max_y, max_z = voxel_df.max() samples = [ (sx, sy, sz) - for sx in np.arange(min_x, max_x, resolution) - for sy in np.arange(min_y, max_y, resolution) - for sz in np.arange(min_z, max_z, resolution) + for sx in np.arange(min_x, max_x + 1, resolution) + for sy in np.arange(min_y, max_y + 1, resolution) + for sz in np.arange(min_z, max_z + 1, resolution) ] offsets = [ @@ -207,6 +396,22 @@ def get_resolution_voxels( def filter_border_voxels( voxels: set[tuple[int, int, int]], resolution: int ) -> list[tuple[int, int, int]]: + """ + Filter voxels to only include the border voxels. + + Parameters + ---------- + voxels + List of voxels. + resolution + Resolution of voxels. + + Returns + ------- + : + List of filtered voxels. + """ + offsets = [ (resolution, 0, 0), (-resolution, 0, 0), @@ -222,4 +427,4 @@ def filter_border_voxels( if len(set(neighbors) - set(voxels)) != 0: filtered_voxels.append((x, y, z)) - return filtered_voxels + return sorted(filtered_voxels) diff --git a/src/arcade_collection/convert/convert_to_tfe.py b/src/arcade_collection/convert/convert_to_tfe.py new file mode 100644 index 0000000..064b621 --- /dev/null +++ b/src/arcade_collection/convert/convert_to_tfe.py @@ -0,0 +1,145 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + import pandas as pd + + +def convert_to_tfe( + all_data: pd.DataFrame, features: list[tuple[str, str, str]], frame_spec: tuple[int, int, int] +) -> dict: + """ + Generate TFE manifest and feature data for simulation. + + Parameters + ---------- + all_data + Simulation data containing ID, TICK, and time. + features + List of feature keys, names, and data types. + frame_spec + Specification for frames. + + Returns + ------- + : + TFE manifest and feature data + """ + + frames = list(np.arange(*frame_spec)) + manifest = get_manifest_data(features, frames) + + frame_data = all_data[all_data["TICK"].isin(frames)] + + tracks = get_tracks_from_data(frame_data) + times = get_times_from_data(frame_data) + + tfe_json = {"manifest": manifest, "tracks": tracks, "times": times, "features": {}} + + for index, (key, _, dtype) in enumerate(features): + if dtype == "categorical": + categories = list(all_data[key].unique()) + manifest["features"][index]["categories"] = categories + else: + categories = None + + tfe_json["features"][key] = get_feature_from_data(frame_data, key, categories) + + return tfe_json + + +def get_manifest_data(features: list[tuple[str, str, str]], frames: list[int]) -> dict: + """ + Build manifest for TFE. + + Parameters + ---------- + features + List of feature keys, names, and data types. + frames + List of frames. + + Returns + ------- + : + Manifest in TFE format. + """ + + return { + "frames": [f"frames/frame_{i}.png" for i in range(len(frames))], + "features": [ + {"key": key, "name": name, "data": f"features/{key}.json", "type": dtype} + for key, name, dtype in features + ], + "tracks": "tracks.json", + "times": "times.json", + } + + +def get_tracks_from_data(data: pd.DataFrame) -> dict: + """ + Extract track ids from data and format for TFE. + + Parameters + ---------- + data + Simulation data for selected frames. + + Returns + ------- + : + Track data in TFE format. + """ + + return {"data": [0, *list(data["ID"])]} + + +def get_times_from_data(data: pd.DataFrame) -> dict: + """ + Extract time points from data and format for TFE. + + Parameters + ---------- + data + Simulation data for selected frames. + + Returns + ------- + : + Time data in TFE format. + """ + + return {"data": [0, *list(data["time"])]} + + +def get_feature_from_data(data: pd.DataFrame, feature: str, categories: list | None = None) -> dict: + """ + Extract specified feature from data and format for TFE. + + Parameters + ---------- + data + Simulation data for selected frames. + feature + Feature key. + categories + List of data categories (if data is categorical). + + Returns + ------- + : + Feature data in TFE format. + """ + + if categories is not None: + feature_values = data[feature].apply(categories.index) + else: + feature_values = data[feature] + + feature_min = float(np.nanmin(feature_values)) + feature_max = float(np.nanmax(feature_values)) + + return {"data": [0, *list(feature_values)], "min": feature_min, "max": feature_max} diff --git a/tests/arcade_collection/convert/__init__.py b/tests/arcade_collection/convert/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/arcade_collection/convert/test_convert_to_contours.py b/tests/arcade_collection/convert/test_convert_to_contours.py new file mode 100644 index 0000000..2fd8ab7 --- /dev/null +++ b/tests/arcade_collection/convert/test_convert_to_contours.py @@ -0,0 +1,336 @@ +import unittest + +from arcade_collection.convert.convert_to_contours import convert_to_contours + +from .utilities import build_tar_instance + + +class TestConvertToContours(unittest.TestCase): + def setUp(self): + self.series_key = "SERIES_KEY" + self.frame = 5 + self.file = f"{self.series_key}_{self.frame:06d}.LOCATIONS.json" + + def test_convert_to_contours_no_voxels(self): + regions = ["DEFAULT"] + box = (1, 1, 1) + indices = {"top": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": []}, + ], + }, + ] + } + + locations_tar = build_tar_instance(contents) + + expected_contours = {"DEFAULT": {"top": {}}} + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + def test_convert_to_contours_no_index(self): + regions = ["DEFAULT"] + box = (3, 3, 3) + indices = {"top": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[0, 0, 2]]}, + ], + }, + ] + } + + locations_tar = build_tar_instance(contents) + + expected_contours = {"DEFAULT": {"top": {}}} + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + def test_convert_to_contours_different_views(self): + regions = ["DEFAULT"] + box = (4, 5, 6) + indices = {"top": [1], "side1": [1], "side2": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + { + "region": "DEFAULT", + "voxels": [ + [1, 1, 1], + [1, 2, 1], + [1, 3, 1], + [2, 3, 1], + [2, 1, 1], + [2, 1, 2], + [2, 1, 3], + [1, 1, 2], + ], + }, + ], + } + ] + } + + expected_contours = { + "DEFAULT": { + "top": { + 1: [ + [ + [2.5, 3.0], + [2.0, 2.5], + [1.5, 2.0], + [2.0, 1.5], + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [0.5, 3.0], + [1.0, 3.5], + [2.0, 3.5], + [2.5, 3.0], + ] + ] + }, + "side1": { + 1: [ + [ + [2.5, 3.0], + [2.5, 2.0], + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [1.0, 2.5], + [1.5, 3.0], + [2.0, 3.5], + [2.5, 3.0], + ] + ] + }, + "side2": { + 1: [ + [ + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [0.5, 3.0], + [1.0, 3.5], + [1.5, 3.0], + [1.5, 2.0], + [2.0, 1.5], + [2.5, 1.0], + ] + ] + }, + } + } + + locations_tar = build_tar_instance(contents) + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + def test_convert_to_contours_multiple_regions(self): + regions = ["DEFAULT", "REGION_A", "REGION_B"] + box = (4, 5, 6) + indices = {"top": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 1, 1], [1, 2, 1], [2, 1, 1]]}, + {"region": "REGION_A", "voxels": [[2, 2, 1]]}, + {"region": "REGION_B", "voxels": [[2, 3, 1]]}, + ], + } + ] + } + + expected_contours = { + "DEFAULT": { + "top": { + 1: [ + [ + [2.5, 3.0], + [2.5, 2.0], + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [1.0, 2.5], + [1.5, 3.0], + [2.0, 3.5], + [2.5, 3.0], + ] + ] + } + }, + "REGION_A": { + "top": {1: [[[2.5, 2.0], [2.0, 1.5], [1.5, 2.0], [2.0, 2.5], [2.5, 2.0]]]} + }, + "REGION_B": { + "top": {1: [[[2.5, 3.0], [2.0, 2.5], [1.5, 3.0], [2.0, 3.5], [2.5, 3.0]]]} + }, + } + + locations_tar = build_tar_instance(contents) + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + def test_convert_to_contours_disconnected_location(self): + regions = ["DEFAULT"] + box = (5, 6, 6) + indices = {"top": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + { + "region": "DEFAULT", + "voxels": [ + [1, 1, 1], + [1, 2, 1], + [2, 1, 1], + [3, 3, 1], + [3, 4, 1], + [2, 4, 1], + ], + }, + ], + }, + ] + } + + expected_contours = { + "DEFAULT": { + "top": { + 1: [ + [ + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [1.0, 2.5], + [1.5, 2.0], + [2.0, 1.5], + [2.5, 1.0], + ], + [ + [3.5, 4.0], + [3.5, 3.0], + [3.0, 2.5], + [2.5, 3.0], + [2.0, 3.5], + [1.5, 4.0], + [2.0, 4.5], + [3.0, 4.5], + [3.5, 4.0], + ], + ] + } + } + } + + locations_tar = build_tar_instance(contents) + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + def test_convert_to_contours_multiple_locations(self): + regions = ["DEFAULT"] + box = (4, 5, 6) + indices = {"top": [1]} + contents = { + self.file: [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 1, 1], [1, 2, 1], [2, 1, 1]]}, + ], + }, + { + "id": 2, + "location": [ + {"region": "DEFAULT", "voxels": [[2, 2, 1], [2, 3, 1], [1, 3, 1]]}, + ], + }, + ] + } + + expected_contours = { + "DEFAULT": { + "top": { + 1: [ + [ + [2.5, 1.0], + [2.0, 0.5], + [1.0, 0.5], + [0.5, 1.0], + [0.5, 2.0], + [1.0, 2.5], + [1.5, 2.0], + [2.0, 1.5], + [2.5, 1.0], + ], + [ + [2.5, 3.0], + [2.5, 2.0], + [2.0, 1.5], + [1.5, 2.0], + [1.0, 2.5], + [0.5, 3.0], + [1.0, 3.5], + [2.0, 3.5], + [2.5, 3.0], + ], + ] + } + } + } + + locations_tar = build_tar_instance(contents) + + contours = convert_to_contours( + self.series_key, locations_tar, self.frame, regions, box, indices + ) + + self.assertDictEqual(expected_contours, contours) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/arcade_collection/convert/test_convert_to_images.py b/tests/arcade_collection/convert/test_convert_to_images.py new file mode 100644 index 0000000..9bf8521 --- /dev/null +++ b/tests/arcade_collection/convert/test_convert_to_images.py @@ -0,0 +1,1034 @@ +import unittest + +import numpy as np + +from arcade_collection.convert.convert_to_images import ImageType, convert_to_images + +from .utilities import build_tar_instance + + +class TestConvertToImages(unittest.TestCase): + def setUp(self): + self.series_key = "SERIES_KEY" + self.frame_spec = (5, 16, 5) + self.regions = ["DEFAULT", "REGION"] + self.box = (8, 8, 3) + + contents = { + f"{self.series_key}_000005.LOCATIONS.json": [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 1, 1], [2, 1, 1], [2, 2, 1]]}, + {"region": "REGION", "voxels": [[1, 2, 1]]}, + ], + }, + { + "id": 2, + "location": [ + {"region": "DEFAULT", "voxels": [[5, 3, 1], [5, 4, 1]]}, + {"region": "REGION", "voxels": [[6, 3, 1], [6, 4, 1]]}, + ], + }, + ], + f"{self.series_key}_000010.LOCATIONS.json": [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[3, 1, 1]]}, + {"region": "REGION", "voxels": [[3, 2, 1], [4, 1, 1], [4, 2, 1]]}, + ], + }, + { + "id": 2, + "location": [ + { + "region": "DEFAULT", + "voxels": [[1, 3, 1], [2, 3, 1], [1, 4, 1], [2, 4, 1]], + }, + {"region": "REGION", "voxels": []}, + ], + }, + { + "id": 3, + "location": [ + {"region": "DEFAULT", "voxels": [[3, 5, 1], [4, 5, 1]]}, + {"region": "REGION", "voxels": [[3, 6, 1], [4, 6, 1]]}, + ], + }, + ], + f"{self.series_key}_000015.LOCATIONS.json": [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[2, 4, 1], [3, 3, 1], [3, 4, 1]]}, + {"region": "REGION", "voxels": [[2, 3, 1], [4, 3, 1], [4, 4, 1]]}, + ], + }, + ], + } + + self.locations_tar = build_tar_instance(contents) + + def test_convert_to_images_full_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FULL + + chunk_00 = np.array( + [ + [ + [ + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 2], + [0, 0, 0, 0, 0, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ], + [ + [ + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 3, 3, 0, 0], + [0, 0, 3, 3, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 3, 3, 0, 0], + ] + ], + ], + [ + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ], + ] + ) + + expected_images = [(0, 0, chunk_00, None)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertIsNone(chunk[3]) + + def test_convert_to_images_full_by_frame_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FULL_BY_FRAME + + chunk_00_05 = np.array( + [ + [ + [ + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 2], + [0, 0, 0, 0, 0, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ] + ] + ) + chunk_00_10 = np.array( + [ + [ + [ + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 3, 3, 0, 0], + [0, 0, 3, 3, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 3, 3, 0, 0], + ] + ], + ] + ] + ) + chunk_00_15 = np.array( + [ + [ + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ] + ] + ) + + expected_images = [(0, 0, chunk_00_05, 5), (0, 0, chunk_00_10, 10), (0, 0, chunk_00_15, 15)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_by_frame_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FLAT_BY_FRAME + + chunk_00_05 = np.array( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ) + chunk_00_10 = np.array( + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [2, 2, 0, 0, 0, 0], + [2, 2, 0, 0, 0, 0], + [0, 0, 3, 3, 0, 0], + [0, 0, 3, 3, 0, 0], + ] + ) + chunk_00_15 = np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ) + + expected_images = [(0, 0, chunk_00_05, 5), (0, 0, chunk_00_10, 10), (0, 0, chunk_00_15, 15)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_rgba_by_frame_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FLAT_RGBA_BY_FRAME + + chunk_00_05 = np.zeros((6, 6, 4), dtype=np.uint8) + chunk_00_10 = np.zeros((6, 6, 4), dtype=np.uint8) + chunk_00_15 = np.zeros((6, 6, 4), dtype=np.uint8) + + chunk_00_05[:, :, 3] = 255 + chunk_00_10[:, :, 3] = 255 + chunk_00_15[:, :, 3] = 255 + + chunk_00_05[:, :, 0] = [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 2, 2], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + chunk_00_10[:, :, 0] = [ + [0, 0, 3, 3, 0, 0], + [0, 0, 3, 3, 0, 0], + [4, 4, 0, 0, 0, 0], + [4, 4, 0, 0, 0, 0], + [0, 0, 5, 5, 0, 0], + [0, 0, 5, 5, 0, 0], + ] + chunk_00_15[:, :, 0] = [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 6, 6, 6, 0, 0], + [0, 6, 6, 6, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + + expected_images = [(0, 0, chunk_00_05, 5), (0, 0, chunk_00_10, 10), (0, 0, chunk_00_15, 15)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_full_binary_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FULL_BINARY + + chunk_00 = np.array( + [ + [ + [ + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ], + [ + [ + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + ] + ], + ], + [ + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ], + ] + ) + + expected_images = [(0, 0, chunk_00, None)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertIsNone(chunk[3]) + + def test_convert_to_images_full_binary_by_frame_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FULL_BINARY_BY_FRAME + + chunk_00_05 = np.array( + [ + [ + [ + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ] + ] + ) + chunk_00_10 = np.array( + [ + [ + [ + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + ] + ], + ] + ] + ) + chunk_00_15 = np.array( + [ + [ + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + [ + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ], + ] + ] + ) + + expected_images = [(0, 0, chunk_00_05, 5), (0, 0, chunk_00_10, 10), (0, 0, chunk_00_15, 15)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_binary_by_frame_without_chunks(self): + chunk_size = 6 + image_type = ImageType.FLAT_BINARY_BY_FRAME + + chunk_00_05 = np.array( + [ + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 1, 1], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ) + chunk_00_10 = np.array( + [ + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + [1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 1, 1, 0, 0], + [0, 0, 1, 1, 0, 0], + ] + ) + chunk_00_15 = np.array( + [ + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + ] + ) + + expected_images = [(0, 0, chunk_00_05, 5), (0, 0, chunk_00_10, 10), (0, 0, chunk_00_15, 15)] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_full_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FULL + + chunk_00 = np.array( + [ + [[[[1, 1, 0], [1, 1, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 1], [0, 0, 1], [2, 2, 0]]], [[[0, 0, 0], [0, 0, 1], [0, 0, 0]]]], + [[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 1, 0]]]], + ] + ) + chunk_10 = np.array( + [ + [[[[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], + [[[[2, 2, 0], [0, 0, 3], [0, 0, 3]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 3]]]], + [[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], + ] + ) + chunk_01 = np.array( + [ + [[[[0, 0, 0], [0, 0, 0], [0, 2, 2]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 2]]]], + [[[[1, 0, 0], [1, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [1, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 0], [0, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]], + ] + ) + chunk_11 = np.array( + [ + [[[[0, 2, 2], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 2], [0, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 0], [3, 0, 0], [3, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [3, 0, 0]]]], + [[[[1, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [0, 0, 0], [0, 0, 0]]]], + ] + ) + + expected_images = [ + (0, 0, chunk_00, None), + (0, 1, chunk_01, None), + (1, 0, chunk_10, None), + (1, 1, chunk_11, None), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertIsNone(chunk[3]) + + def test_convert_to_images_full_by_frame_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FULL_BY_FRAME + + chunk_00_05 = np.array( + [[[[[1, 1, 0], [1, 1, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]]]]] + ) + chunk_00_10 = np.array( + [[[[[0, 0, 1], [0, 0, 1], [2, 2, 0]]], [[[0, 0, 0], [0, 0, 1], [0, 0, 0]]]]] + ) + chunk_00_15 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 1, 0]]]]] + ) + chunk_10_10 = np.array( + [[[[[2, 2, 0], [0, 0, 3], [0, 0, 3]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 3]]]]] + ) + chunk_10_15 = np.array( + [[[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]]] + ) + chunk_01_05 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [0, 2, 2]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 2]]]]] + ) + chunk_01_10 = np.array( + [[[[[1, 0, 0], [1, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [1, 0, 0], [0, 0, 0]]]]] + ) + chunk_01_15 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]]] + ) + chunk_11_05 = np.array( + [[[[[0, 2, 2], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 2], [0, 0, 0], [0, 0, 0]]]]] + ) + chunk_11_10 = np.array( + [[[[[0, 0, 0], [3, 0, 0], [3, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [3, 0, 0]]]]] + ) + chunk_11_15 = np.array( + [[[[[1, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [0, 0, 0], [0, 0, 0]]]]] + ) + + expected_images = [ + (0, 0, chunk_00_05, 5), + (0, 1, chunk_01_05, 5), + (1, 1, chunk_11_05, 5), + (0, 0, chunk_00_10, 10), + (0, 1, chunk_01_10, 10), + (1, 0, chunk_10_10, 10), + (1, 1, chunk_11_10, 10), + (0, 0, chunk_00_15, 15), + (0, 1, chunk_01_15, 15), + (1, 0, chunk_10_15, 15), + (1, 1, chunk_11_15, 15), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_by_frame_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FLAT_BY_FRAME + + chunk_00_05 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]]) + chunk_00_10 = np.array([[0, 0, 1], [0, 0, 1], [2, 2, 0]]) + chunk_00_15 = np.array([[0, 0, 0], [0, 0, 0], [0, 1, 1]]) + chunk_10_10 = np.array([[2, 2, 0], [0, 0, 3], [0, 0, 3]]) + chunk_10_15 = np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]) + chunk_01_05 = np.array([[0, 0, 0], [0, 0, 0], [0, 2, 2]]) + chunk_01_10 = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + chunk_01_15 = np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]]) + chunk_11_05 = np.array([[0, 2, 2], [0, 0, 0], [0, 0, 0]]) + chunk_11_10 = np.array([[0, 0, 0], [3, 0, 0], [3, 0, 0]]) + chunk_11_15 = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + expected_images = [ + (0, 0, chunk_00_05, 5), + (0, 1, chunk_01_05, 5), + (1, 1, chunk_11_05, 5), + (0, 0, chunk_00_10, 10), + (0, 1, chunk_01_10, 10), + (1, 0, chunk_10_10, 10), + (1, 1, chunk_11_10, 10), + (0, 0, chunk_00_15, 15), + (0, 1, chunk_01_15, 15), + (1, 0, chunk_10_15, 15), + (1, 1, chunk_11_15, 15), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_rgba_by_frame_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FLAT_RGBA_BY_FRAME + + chunk_00_05 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_00_10 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_00_15 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_01_05 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_01_10 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_01_15 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_10_10 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_10_15 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_11_05 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_11_10 = np.zeros((3, 3, 4), dtype=np.uint8) + chunk_11_15 = np.zeros((3, 3, 4), dtype=np.uint8) + + chunk_00_05[:, :, 3] = 255 + chunk_00_10[:, :, 3] = 255 + chunk_00_15[:, :, 3] = 255 + chunk_01_05[:, :, 3] = 255 + chunk_01_10[:, :, 3] = 255 + chunk_01_15[:, :, 3] = 255 + chunk_10_10[:, :, 3] = 255 + chunk_10_15[:, :, 3] = 255 + chunk_11_05[:, :, 3] = 255 + chunk_11_10[:, :, 3] = 255 + chunk_11_15[:, :, 3] = 255 + + chunk_00_05[:, :, 0] = [[1, 1, 0], [1, 1, 0], [0, 0, 0]] + chunk_00_10[:, :, 0] = [[0, 0, 3], [0, 0, 3], [4, 4, 0]] + chunk_00_15[:, :, 0] = [[0, 0, 0], [0, 0, 0], [0, 6, 6]] + chunk_10_10[:, :, 0] = [[4, 4, 0], [0, 0, 5], [0, 0, 5]] + chunk_10_15[:, :, 0] = [[0, 6, 6], [0, 0, 0], [0, 0, 0]] + chunk_01_05[:, :, 0] = [[0, 0, 0], [0, 0, 0], [0, 2, 2]] + chunk_01_10[:, :, 0] = [[3, 0, 0], [3, 0, 0], [0, 0, 0]] + chunk_01_15[:, :, 0] = [[0, 0, 0], [0, 0, 0], [6, 0, 0]] + chunk_11_05[:, :, 0] = [[0, 2, 2], [0, 0, 0], [0, 0, 0]] + chunk_11_10[:, :, 0] = [[0, 0, 0], [5, 0, 0], [5, 0, 0]] + chunk_11_15[:, :, 0] = [[6, 0, 0], [0, 0, 0], [0, 0, 0]] + + expected_images = [ + (0, 0, chunk_00_05, 5), + (0, 1, chunk_01_05, 5), + (1, 1, chunk_11_05, 5), + (0, 0, chunk_00_10, 10), + (0, 1, chunk_01_10, 10), + (1, 0, chunk_10_10, 10), + (1, 1, chunk_11_10, 10), + (0, 0, chunk_00_15, 15), + (0, 1, chunk_01_15, 15), + (1, 0, chunk_10_15, 15), + (1, 1, chunk_11_15, 15), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_full_binary_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FULL_BINARY + + chunk_00 = np.array( + [ + [[[[1, 1, 0], [1, 1, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 1], [0, 0, 1], [1, 1, 0]]], [[[0, 0, 0], [0, 0, 1], [0, 0, 0]]]], + [[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 1, 0]]]], + ] + ) + chunk_10 = np.array( + [ + [[[[0, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], + [[[[1, 1, 0], [0, 0, 1], [0, 0, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]]], + [[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]], + ] + ) + chunk_01 = np.array( + [ + [[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]]], + [[[[1, 0, 0], [1, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [1, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 0], [0, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]], + ] + ) + chunk_11 = np.array( + [ + [[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 1], [0, 0, 0], [0, 0, 0]]]], + [[[[0, 0, 0], [1, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]], + [[[[1, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [0, 0, 0], [0, 0, 0]]]], + ] + ) + + expected_images = [ + (0, 0, chunk_00, None), + (0, 1, chunk_01, None), + (1, 0, chunk_10, None), + (1, 1, chunk_11, None), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertIsNone(chunk[3]) + + def test_convert_to_images_full_binary_by_frame_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FULL_BINARY_BY_FRAME + + chunk_00_05 = np.array( + [[[[[1, 1, 0], [1, 1, 0], [0, 0, 0]]], [[[0, 0, 0], [1, 0, 0], [0, 0, 0]]]]] + ) + chunk_00_10 = np.array( + [[[[[0, 0, 1], [0, 0, 1], [1, 1, 0]]], [[[0, 0, 0], [0, 0, 1], [0, 0, 0]]]]] + ) + chunk_00_15 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 1, 0]]]]] + ) + chunk_10_10 = np.array( + [[[[[1, 1, 0], [0, 0, 1], [0, 0, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]]]] + ) + chunk_10_15 = np.array( + [[[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]]] + ) + chunk_01_05 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [0, 1, 1]]], [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]]]] + ) + chunk_01_10 = np.array( + [[[[[1, 0, 0], [1, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [1, 0, 0], [0, 0, 0]]]]] + ) + chunk_01_15 = np.array( + [[[[[0, 0, 0], [0, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]]] + ) + chunk_11_05 = np.array( + [[[[[0, 1, 1], [0, 0, 0], [0, 0, 0]]], [[[0, 0, 1], [0, 0, 0], [0, 0, 0]]]]] + ) + chunk_11_10 = np.array( + [[[[[0, 0, 0], [1, 0, 0], [1, 0, 0]]], [[[0, 0, 0], [0, 0, 0], [1, 0, 0]]]]] + ) + chunk_11_15 = np.array( + [[[[[1, 0, 0], [0, 0, 0], [0, 0, 0]]], [[[1, 0, 0], [0, 0, 0], [0, 0, 0]]]]] + ) + + expected_images = [ + (0, 0, chunk_00_05, 5), + (0, 1, chunk_01_05, 5), + (1, 1, chunk_11_05, 5), + (0, 0, chunk_00_10, 10), + (0, 1, chunk_01_10, 10), + (1, 0, chunk_10_10, 10), + (1, 1, chunk_11_10, 10), + (0, 0, chunk_00_15, 15), + (0, 1, chunk_01_15, 15), + (1, 0, chunk_10_15, 15), + (1, 1, chunk_11_15, 15), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + def test_convert_to_images_flat_binary_by_frame_with_chunks(self): + chunk_size = 3 + image_type = ImageType.FLAT_BINARY_BY_FRAME + + chunk_00_05 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]]) + chunk_00_10 = np.array([[0, 0, 1], [0, 0, 1], [1, 1, 0]]) + chunk_00_15 = np.array([[0, 0, 0], [0, 0, 0], [0, 1, 1]]) + chunk_10_10 = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]]) + chunk_10_15 = np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]) + chunk_01_05 = np.array([[0, 0, 0], [0, 0, 0], [0, 1, 1]]) + chunk_01_10 = np.array([[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + chunk_01_15 = np.array([[0, 0, 0], [0, 0, 0], [1, 0, 0]]) + chunk_11_05 = np.array([[0, 1, 1], [0, 0, 0], [0, 0, 0]]) + chunk_11_10 = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0]]) + chunk_11_15 = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + expected_images = [ + (0, 0, chunk_00_05, 5), + (0, 1, chunk_01_05, 5), + (1, 1, chunk_11_05, 5), + (0, 0, chunk_00_10, 10), + (0, 1, chunk_01_10, 10), + (1, 0, chunk_10_10, 10), + (1, 1, chunk_11_10, 10), + (0, 0, chunk_00_15, 15), + (0, 1, chunk_01_15, 15), + (1, 0, chunk_10_15, 15), + (1, 1, chunk_11_15, 15), + ] + + images = convert_to_images( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + chunk_size, + image_type, + ) + + for expected_chunk, chunk in zip(expected_images, images): + self.assertEqual(expected_chunk[0], chunk[0]) + self.assertEqual(expected_chunk[1], chunk[1]) + self.assertTrue(np.array_equal(expected_chunk[2], chunk[2])) + self.assertEqual(expected_chunk[3], chunk[3]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/arcade_collection/convert/test_convert_to_meshes.py b/tests/arcade_collection/convert/test_convert_to_meshes.py new file mode 100644 index 0000000..087a4bc --- /dev/null +++ b/tests/arcade_collection/convert/test_convert_to_meshes.py @@ -0,0 +1,432 @@ +import sys +import unittest +from unittest import mock + +import numpy as np +import pandas as pd + +from arcade_collection.convert.convert_to_meshes import MAX_ARRAY_LEVEL, MeshType, convert_to_meshes + +from .utilities import build_tar_instance + + +def mock_marching_cubes(array, **_): + array[array != MAX_ARRAY_LEVEL] = 0 + voxels = list(zip(*np.nonzero(array))) + vertices = np.array(voxels, dtype="float") + faces = np.reshape(range(len(voxels) * 3), (-1, 3)) + normals = np.array(voxels, dtype="float") + return vertices, faces, normals, None + + +class TestConvertToMeshes(unittest.TestCase): + def setUp(self): + self.series_key = "SERIES_KEY" + self.frame_spec = (5, 16, 5) + self.regions = ["DEFAULT", "REGION"] + self.box = (6, 6, 3) + + contents = { + f"{self.series_key}_000005.LOCATIONS.json": [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 1, 1], [1, 2, 1], [2, 2, 1]]}, + {"region": "REGION", "voxels": [[2, 1, 1]]}, + ], + }, + { + "id": 2, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 1, 1], [1, 2, 1]]}, + {"region": "REGION", "voxels": [[2, 1, 1], [2, 2, 1]]}, + ], + }, + ], + f"{self.series_key}_000010.LOCATIONS.json": [ + { + "id": 3, + "location": [ + {"region": "DEFAULT", "voxels": []}, + { + "region": "REGION", + "voxels": [[1, 1, 1], [1, 2, 1], [2, 1, 1], [2, 2, 1]], + }, + ], + }, + ], + f"{self.series_key}_000015.LOCATIONS.json": [ + { + "id": 4, + "location": [ + {"region": "DEFAULT", "voxels": []}, + {"region": "REGION", "voxels": []}, + ], + }, + ], + } + + self.locations_tar = build_tar_instance(contents) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_meshes"], + "measure", + return_value=mock.Mock(), + ) + def test_convert_to_meshes_default_no_group(self, measure_mock): + mesh_type = MeshType.DEFAULT + group_size = None + categories = None + + measure_mock.marching_cubes.side_effect = mock_marching_cubes + + obj_05_1_default = ( + "v -0.5 -0.5 0.0\n" + "v -0.5 0.5 0.0\n" + "v 0.5 -0.5 0.0\n" + "v 0.5 0.5 0.0\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n" + ) + obj_05_1_region = "v 0.0 0.0 0.0\nvn 2.0 4.0 1.0\nf 3//3 2//2 1//1\n" + obj_05_2_default = obj_05_1_default + obj_05_2_region = ( + "v 0.0 -0.5 0.0\n" + "v 0.0 0.5 0.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + ) + obj_10_3_default = obj_05_1_default + obj_10_3_region = obj_05_1_default + + expected_meshes = [ + (5, 1, "DEFAULT", obj_05_1_default), + (5, 2, "DEFAULT", obj_05_2_default), + (5, 1, "REGION", obj_05_1_region), + (5, 2, "REGION", obj_05_2_region), + (10, 3, "DEFAULT", obj_10_3_default), + (10, 3, "REGION", obj_10_3_region), + ] + + meshes = convert_to_meshes( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + mesh_type, + group_size, + categories, + ) + + self.assertCountEqual(expected_meshes, meshes) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_meshes"], + "measure", + return_value=mock.Mock(), + ) + def test_convert_to_meshes_all_mesh_type_no_group(self, measure_mock): + mesh_type = MeshType.INVERTED + group_size = None + categories = None + + measure_mock.marching_cubes.side_effect = mock_marching_cubes + + obj_05_1_default = ( + "v -0.5 -0.5 0.0\n" + "v -0.5 0.5 0.0\n" + "v 0.5 -0.5 0.0\n" + "v 0.5 0.5 0.0\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 1//1 2//2 3//3\n" + "f 4//4 5//5 6//6\n" + "f 7//7 8//8 9//9\n" + "f 10//10 11//11 12//12\n" + ) + obj_05_1_region = "v 0.0 0.0 0.0\nvn 2.0 4.0 1.0\nf 1//1 2//2 3//3\n" + obj_05_2_default = obj_05_1_default + obj_05_2_region = ( + "v 0.0 -0.5 0.0\n" + "v 0.0 0.5 0.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 1//1 2//2 3//3\n" + "f 4//4 5//5 6//6\n" + ) + obj_10_3_default = obj_05_1_default + obj_10_3_region = obj_05_1_default + + expected_meshes = [ + (5, 1, "DEFAULT", obj_05_1_default), + (5, 2, "DEFAULT", obj_05_2_default), + (5, 1, "REGION", obj_05_1_region), + (5, 2, "REGION", obj_05_2_region), + (10, 3, "DEFAULT", obj_10_3_default), + (10, 3, "REGION", obj_10_3_region), + ] + + meshes = convert_to_meshes( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + mesh_type, + group_size, + categories, + ) + + self.assertCountEqual(expected_meshes, meshes) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_meshes"], + "measure", + return_value=mock.Mock(), + ) + def test_convert_to_meshes_region_mesh_type_no_group(self, measure_mock): + mesh_type = {"DEFAULT": MeshType.DEFAULT, "REGION": MeshType.INVERTED} + group_size = None + categories = None + + measure_mock.marching_cubes.side_effect = mock_marching_cubes + + obj_05_1_default = ( + "v -0.5 -0.5 0.0\n" + "v -0.5 0.5 0.0\n" + "v 0.5 -0.5 0.0\n" + "v 0.5 0.5 0.0\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n" + ) + obj_05_1_region = "v 0.0 0.0 0.0\nvn 2.0 4.0 1.0\nf 1//1 2//2 3//3\n" + obj_05_2_default = obj_05_1_default + obj_05_2_region = ( + "v 0.0 -0.5 0.0\n" + "v 0.0 0.5 0.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 1//1 2//2 3//3\n" + "f 4//4 5//5 6//6\n" + ) + obj_10_3_default = obj_05_1_default + obj_10_3_region = ( + "v -0.5 -0.5 0.0\n" + "v -0.5 0.5 0.0\n" + "v 0.5 -0.5 0.0\n" + "v 0.5 0.5 0.0\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 1//1 2//2 3//3\n" + "f 4//4 5//5 6//6\n" + "f 7//7 8//8 9//9\n" + "f 10//10 11//11 12//12\n" + ) + + expected_meshes = [ + (5, 1, "DEFAULT", obj_05_1_default), + (5, 2, "DEFAULT", obj_05_2_default), + (5, 1, "REGION", obj_05_1_region), + (5, 2, "REGION", obj_05_2_region), + (10, 3, "DEFAULT", obj_10_3_default), + (10, 3, "REGION", obj_10_3_region), + ] + + meshes = convert_to_meshes( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + mesh_type, + group_size, + categories, + ) + + self.assertCountEqual(expected_meshes, meshes) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_meshes"], + "measure", + return_value=mock.Mock(), + ) + def test_convert_to_meshes_default_mesh_type_with_group_same_category(self, measure_mock): + mesh_type = MeshType.DEFAULT + group_size = 2 + categories = pd.DataFrame( + {"FRAME": [5, 5, 10, 15], "CATEGORY": ["A", "A", "A", "A"], "ID": [1, 2, 3, 4]} + ) + + measure_mock.marching_cubes.side_effect = mock_marching_cubes + + obj_05_0_default = ( + "v -2.0 0.0 -0.5\n" + "v -2.0 1.0 -0.5\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n\n" + "v -2.0 0.0 -0.5\n" + "v -2.0 1.0 -0.5\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 7//7 6//6 5//5\n" + "f 10//10 9//9 8//8\n" + "f 13//13 12//12 11//11\n" + "f 16//16 15//15 14//14\n" + ) + obj_05_0_region = ( + "v -1.0 1.0 -0.5\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 4//4 3//3 2//2\n" + "f 7//7 6//6 5//5\n" + ) + obj_10_0_default = ( + "v -2.0 0.0 -0.5\n" + "v -2.0 1.0 -0.5\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n" + ) + obj_10_0_region = obj_10_0_default + + expected_meshes = [ + (5, 0, "DEFAULT", obj_05_0_default), + (5, 0, "REGION", obj_05_0_region), + (10, 0, "DEFAULT", obj_10_0_default), + (10, 0, "REGION", obj_10_0_region), + ] + + meshes = convert_to_meshes( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + mesh_type, + group_size, + categories, + ) + + self.assertCountEqual(expected_meshes, meshes) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_meshes"], + "measure", + return_value=mock.Mock(), + ) + def test_convert_to_meshes_default_mesh_type_with_group_different_category(self, measure_mock): + mesh_type = MeshType.DEFAULT + group_size = 2 + categories = pd.DataFrame( + {"FRAME": [5, 5, 10, 15], "CATEGORY": ["A", "B", "A", "A"], "ID": [1, 2, 3, 4]} + ) + + measure_mock.marching_cubes.side_effect = mock_marching_cubes + + obj_05_0_default = ( + "v -2.0 0.0 -0.5\n" + "v -2.0 1.0 -0.5\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n" + ) + obj_05_0_region = "v -1.0 1.0 -0.5\nvn 2.0 4.0 1.0\nf 3//3 2//2 1//1\n" + obj_05_1_default = obj_05_0_default + obj_05_1_region = ( + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + ) + obj_10_0_default = ( + "v -2.0 0.0 -0.5\n" + "v -2.0 1.0 -0.5\n" + "v -1.0 0.0 -0.5\n" + "v -1.0 1.0 -0.5\n" + "vn 1.0 3.0 1.0\n" + "vn 1.0 4.0 1.0\n" + "vn 2.0 3.0 1.0\n" + "vn 2.0 4.0 1.0\n" + "f 3//3 2//2 1//1\n" + "f 6//6 5//5 4//4\n" + "f 9//9 8//8 7//7\n" + "f 12//12 11//11 10//10\n" + ) + obj_10_0_region = obj_10_0_default + + expected_meshes = [ + (5, 0, "DEFAULT", obj_05_0_default), + (5, 1, "DEFAULT", obj_05_1_default), + (5, 0, "REGION", obj_05_0_region), + (5, 1, "REGION", obj_05_1_region), + (10, 0, "DEFAULT", obj_10_0_default), + (10, 0, "REGION", obj_10_0_region), + ] + + meshes = convert_to_meshes( + self.series_key, + self.locations_tar, + self.frame_spec, + self.regions, + self.box, + mesh_type, + group_size, + categories, + ) + + self.assertCountEqual(expected_meshes, meshes) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/arcade_collection/convert/test_convert_to_simularium.py b/tests/arcade_collection/convert/test_convert_to_simularium.py new file mode 100644 index 0000000..b2a8429 --- /dev/null +++ b/tests/arcade_collection/convert/test_convert_to_simularium.py @@ -0,0 +1,386 @@ +import json +import sys +import unittest +from unittest import mock + +import numpy as np +import pandas as pd +from simulariumio import DISPLAY_TYPE +from simulariumio.constants import DEFAULT_CAMERA_SETTINGS, VALUES_PER_3D_POINT, VIZ_TYPE + +from arcade_collection.convert.convert_to_simularium import ( + CAMERA_LOOK_AT, + CAMERA_POSITIONS, + convert_to_simularium, + get_agent_data, + get_display_data, + get_meta_data, + shade_color, +) + + +class TestConvertToSimularium(unittest.TestCase): + def test_convert_to_simularium(self): + names = ["X#A#1", "X#B#2", "X#A", "X#A#3#5", "X#A#4", "X#A#5#10", "X#B#6"] + display_types = ["FIBER", "FIBER", "FIBER", "OBJ", "SPHERE", "OBJ", "SPHERE"] + + series_key = "SERIES_KEY" + simulation_type = "simulation_type" + data = pd.DataFrame( + { + "frame": [0, 0, 5, 5, 5, 10, 10], + "name": names, + "points": [[0, 1, 2], [3, 4, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5], [], [], [], []], + "display": display_types, + "radius": [1, 2, 3, 4, 5, 6, 7], + "x": [8, 9, 10, 11, 12, 13, 14], + "y": [15, 16, 17, 18, 19, 20, 21], + "z": [22, 23, 24, 25, 26, 27, 28], + } + ) + length = 10 + width = 20 + height = 30 + ds = (2, 3, 4) + dt = 5 + colors = {"A": "#ff0000", "B": "#0000ff"} + url = "https://url" + jitter = 0.0 + + total_steps = 3 + time_interval = 5 + time_step_size = dt * time_interval + size = {"x": length * ds[0], "y": width * ds[1], "z": height * ds[2]} + camera_position = { + "x": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[0], + "y": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[1], + "z": DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION[2], + } + camera_look_at = { + "x": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[0], + "y": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[1], + "z": DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION[2], + } + type_mapping_entries = [ + (names[0], display_types[0], colors["A"], None), + (names[1], display_types[1], colors["B"], None), + (names[2], display_types[2], colors["A"], None), + (names[3], display_types[3], colors["A"], f"{url}/000005_X_003.MESH.obj"), + (names[4], display_types[4], colors["A"], None), + (names[5], display_types[5], colors["A"], f"{url}/000010_X_005.MESH.obj"), + (names[6], display_types[6], colors["B"], None), + ] + type_mapping = { + str(i): ( + { + "name": name, + "geometry": {"displayType": display_type, "url": url, "color": color}, + } + if url is not None + else { + "name": name, + "geometry": {"displayType": display_type, "color": color}, + } + ) + for i, (name, display_type, color, url) in enumerate(type_mapping_entries) + } + title = f"ARCADE - {series_key}" + model_info = { + "title": "ARCADE", + "version": simulation_type, + "description": f"Agent-based modeling framework ARCADE for {series_key}.", + } + + def make_bundle_data(viz_type, object_id, display_index, x0, y0, z0, radius, subpoints): + rotation = (0, 0, 0) + x = x0 * ds[0] - length * ds[0] / 2 + y = width * ds[1] / 2 - y0 * ds[1] + z = z0 * ds[2] - height * ds[2] / 2 + + subpoints_array = np.reshape(subpoints, (-1, 3)) + subpoints_array[:, 0] *= ds[0] + subpoints_array[:, 1] *= -ds[1] + subpoints_array[:, 2] *= ds[2] + + return [ + viz_type, + object_id, + display_index, + x, + y, + z, + *rotation, + radius, + len(subpoints), + *subpoints_array.ravel().tolist(), + ] + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(VIZ_TYPE.FIBER, 0, 0, 8, 15, 22, 1, [0, 1, 2]), + *make_bundle_data(VIZ_TYPE.FIBER, 1, 1, 9, 16, 23, 2, [3, 4, 5, 6, 7, 8]), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(VIZ_TYPE.FIBER, 0, 2, 10, 17, 24, 3, [0, 1, 2, 3, 4, 5]), + *make_bundle_data(VIZ_TYPE.DEFAULT, 1, 3, 11, 18, 25, 4, []), + *make_bundle_data(VIZ_TYPE.DEFAULT, 2, 4, 12, 19, 26, 5, []), + ], + }, + { + "frameNumber": 2, + "time": 2.0 * time_step_size, + "data": [ + *make_bundle_data(VIZ_TYPE.DEFAULT, 0, 5, 13, 20, 27, 6, []), + *make_bundle_data(VIZ_TYPE.DEFAULT, 1, 6, 14, 21, 28, 7, []), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium( + series_key, + simulation_type, + data, + length, + width, + height, + ds, + dt, + colors, + url, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(camera_position, trajectory_info["cameraDefault"]["position"]) + self.assertDictEqual(camera_look_at, trajectory_info["cameraDefault"]["lookAtPosition"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertEqual(title, trajectory_info["trajectoryTitle"]) + self.assertDictEqual(model_info, trajectory_info["modelInfo"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_get_meta_data_simulation_type_with_defaults(self): + series_key = "SERIES_KEY" + length = 10 + width = 20 + height = 30 + dx = 2 + dy = 3 + dz = 4 + + parameters = ["patch", "potts"] + + expected_box_size = np.array([length * dx, width * dy, height * dz]) + expected_trajectory_title = f"ARCADE - {series_key}" + expected_title = "ARCADE" + expected_description = f"Agent-based modeling framework ARCADE for {series_key}." + + for simulation_type in parameters: + with self.subTest(simulation_type=simulation_type): + expected_version = simulation_type + expected_position = np.array(CAMERA_POSITIONS[simulation_type]) + expected_look_at = np.array(CAMERA_LOOK_AT[simulation_type]) + + meta_data = get_meta_data( + series_key, simulation_type, length, width, height, dx, dy, dz + ) + + self.assertTrue((expected_box_size == meta_data.box_size).all()) + self.assertTrue((expected_position == meta_data.camera_defaults.position).all()) + self.assertTrue( + (expected_look_at == meta_data.camera_defaults.look_at_position).all() + ) + self.assertEqual(expected_trajectory_title, meta_data.trajectory_title) + self.assertEqual(expected_title, meta_data.model_meta_data.title) + self.assertEqual(expected_version, meta_data.model_meta_data.version) + self.assertEqual(expected_description, meta_data.model_meta_data.description) + + def test_get_meta_data_simulation_type_without_defaults(self): + series_key = "SERIES_KEY" + simulation_type = "simulation_type" + length = 10 + width = 20 + height = 30 + dx = 2 + dy = 3 + dz = 4 + + expected_box_size = np.array([length * dx, width * dy, height * dz]) + expected_trajectory_title = f"ARCADE - {series_key}" + expected_title = "ARCADE" + expected_description = f"Agent-based modeling framework ARCADE for {series_key}." + expected_version = simulation_type + expected_position = DEFAULT_CAMERA_SETTINGS.CAMERA_POSITION + expected_look_at = DEFAULT_CAMERA_SETTINGS.LOOK_AT_POSITION + + meta_data = get_meta_data(series_key, simulation_type, length, width, height, dx, dy, dz) + + self.assertTrue((expected_box_size == meta_data.box_size).all()) + self.assertTrue((expected_position == meta_data.camera_defaults.position).all()) + self.assertTrue((expected_look_at == meta_data.camera_defaults.look_at_position).all()) + self.assertEqual(expected_trajectory_title, meta_data.trajectory_title) + self.assertEqual(expected_title, meta_data.model_meta_data.title) + self.assertEqual(expected_version, meta_data.model_meta_data.version) + self.assertEqual(expected_description, meta_data.model_meta_data.description) + + def test_get_agent_data_no_subpoints(self): + data = pd.DataFrame( + { + "frame": [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2], + "name": ["A", "B", "C", "D", "A", "B", "C", "A", "B", "C", "D", "E"], + "points": [[], [], [], [], [], [], [], [], [], [], [], []], + } + ) + + total_steps = 3 + max_agents = 5 + max_subpoints = 0 + + agent_data = get_agent_data(data) + + self.assertEqual((total_steps,), agent_data.times.shape) + self.assertEqual((total_steps,), agent_data.n_agents.shape) + self.assertEqual((total_steps, max_agents), agent_data.viz_types.shape) + self.assertEqual((total_steps, max_agents), agent_data.unique_ids.shape) + self.assertEqual(total_steps, len(agent_data.types)) + self.assertEqual((total_steps, max_agents, VALUES_PER_3D_POINT), agent_data.positions.shape) + self.assertEqual((total_steps, max_agents), agent_data.radii.shape) + self.assertEqual((total_steps, max_agents, VALUES_PER_3D_POINT), agent_data.rotations.shape) + self.assertEqual((total_steps, max_agents), agent_data.n_subpoints.shape) + self.assertEqual((total_steps, max_agents, max_subpoints), agent_data.subpoints.shape) + + def test_get_agent_data_with_subpoints(self): + data = pd.DataFrame( + { + "frame": [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2], + "name": ["A", "B", "C", "D", "A", "B", "C", "A", "B", "C", "D", "E"], + "points": [[0, 1, 2], [], [], [], [], [0, 1, 2, 3, 4, 5], [], [], [], [], [], []], + } + ) + + total_steps = 3 + max_agents = 5 + max_subpoints = 6 + + agent_data = get_agent_data(data) + + self.assertEqual((total_steps,), agent_data.times.shape) + self.assertEqual((total_steps,), agent_data.n_agents.shape) + self.assertEqual((total_steps, max_agents), agent_data.viz_types.shape) + self.assertEqual((total_steps, max_agents), agent_data.unique_ids.shape) + self.assertEqual(total_steps, len(agent_data.types)) + self.assertEqual((total_steps, max_agents, VALUES_PER_3D_POINT), agent_data.positions.shape) + self.assertEqual((total_steps, max_agents), agent_data.radii.shape) + self.assertEqual((total_steps, max_agents, VALUES_PER_3D_POINT), agent_data.rotations.shape) + self.assertEqual((total_steps, max_agents), agent_data.n_subpoints.shape) + self.assertEqual((total_steps, max_agents, max_subpoints), agent_data.subpoints.shape) + + def test_get_display_data_no_url(self): + data = pd.DataFrame( + { + "name": ["GROUP#A", "GROUP#A#4", "GROUP#B#3", "GROUP#A#2", "GROUP#B#1"], + "display": ["SPHERE", "SPHERE", "FIBER", "SPHERE", "FIBER"], + } + ) + colors = {"A": "#ff0000", "B": "#0000ff"} + + expected_data = [ + ("GROUP#A", "GROUP", colors["A"], DISPLAY_TYPE.SPHERE, ""), + ("GROUP#A#2", "2", colors["A"], DISPLAY_TYPE.SPHERE, ""), + ("GROUP#A#4", "4", colors["A"], DISPLAY_TYPE.SPHERE, ""), + ("GROUP#B#1", "1", colors["B"], DISPLAY_TYPE.FIBER, ""), + ("GROUP#B#3", "3", colors["B"], DISPLAY_TYPE.FIBER, ""), + ] + + display_data = get_display_data(data, colors, url="", jitter=0.0) + + for expected, (key, display) in zip(expected_data, display_data.items()): + self.assertTupleEqual( + expected, (key, display.name, display.color, display.display_type, display.url) + ) + + def test_get_display_data_with_url(self): + url = "https://url/" + data = pd.DataFrame( + { + "name": ["GROUP#A#3#1", "GROUP#A#2#1", "GROUP#B#1#1", "GROUP#A#2#0", "GROUP#B#1#0"], + "display": ["OBJ", "OBJ", "SPHERE", "OBJ", "OBJ"], + } + ) + colors = {"A": "#ff0000", "B": "#0000ff"} + + expected_data = [ + ("GROUP#A#2#0", "2", colors["A"], DISPLAY_TYPE.OBJ, f"{url}/000000_GROUP_002.MESH.obj"), + ("GROUP#A#2#1", "2", colors["A"], DISPLAY_TYPE.OBJ, f"{url}/000001_GROUP_002.MESH.obj"), + ("GROUP#A#3#1", "3", colors["A"], DISPLAY_TYPE.OBJ, f"{url}/000001_GROUP_003.MESH.obj"), + ("GROUP#B#1#0", "1", colors["B"], DISPLAY_TYPE.OBJ, f"{url}/000000_GROUP_001.MESH.obj"), + ("GROUP#B#1#1", "1", colors["B"], DISPLAY_TYPE.SPHERE, ""), + ] + + display_data = get_display_data(data, colors, url=url, jitter=0.0) + + for expected, (key, display) in zip(expected_data, display_data.items()): + self.assertTupleEqual( + expected, (key, display.name, display.color, display.display_type, display.url) + ) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_simularium"], + "random", + return_value=mock.Mock(), + ) + def test_get_display_data_with_jitter(self, random_mock): + random_mock.random.side_effect = [0.1, 0.3, 0.7, 0.9] + + data = pd.DataFrame( + { + "name": ["GROUP#A#1", "GROUP#A#2", "GROUP#A#3", "GROUP#A#4"], + "display": ["SPHERE", "SPHERE", "SPHERE", "SPHERE"], + } + ) + jitter = 0.5 + color = "#ff55ee" + colors = {"A": color} + + expected_colors = [ + shade_color(color, -0.2 * jitter), + shade_color(color, -0.1 * jitter), + shade_color(color, 0.1 * jitter), + shade_color(color, 0.2 * jitter), + ] + + display_data = get_display_data(data, colors, url="", jitter=jitter) + + for expected_color, display in zip(expected_colors, display_data.values()): + self.assertEqual(expected_color, display.color) + + def test_shade_color(self): + original_color = "#F0F00F" + parameters = [ + (0.0, "#F0F00F"), # unchanged + (-1.0, "#000000"), # full shade to black + (1.0, "#FFFFFF"), # full shade to white + (-0.5, "#787808"), # half shade to black + (0.5, "#F8F887"), # half shade to white + ] + + for alpha, expected_color in parameters: + with self.subTest(alpha=alpha): + color = shade_color(original_color, alpha) + self.assertEqual(expected_color.lower(), color.lower()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/arcade_collection/convert/test_convert_to_simularium_objects.py b/tests/arcade_collection/convert/test_convert_to_simularium_objects.py index 66da0bc..d0db8e5 100644 --- a/tests/arcade_collection/convert/test_convert_to_simularium_objects.py +++ b/tests/arcade_collection/convert/test_convert_to_simularium_objects.py @@ -1,14 +1,160 @@ +import json import unittest +import numpy as np +import pandas as pd +from simulariumio.constants import VIZ_TYPE + +from arcade_collection.convert.convert_to_simularium import CAMERA_LOOK_AT, CAMERA_POSITIONS from arcade_collection.convert.convert_to_simularium_objects import convert_to_simularium_objects class TestConvertToSimulariumObjects(unittest.TestCase): - def test_convert_to_simularium_objects_invalid_type_throws_exception(self) -> None: + def test_convert_to_simularium_objects_potts(self): + series_key = "SERIES_KEY" + simulation_type = "potts" + categories = pd.DataFrame( + { + "ID": [1, 3, 4, 1, 3, 4, 1, 4], + "FRAME": [0, 0, 0, 5, 5, 5, 10, 10], + "CATEGORY": ["A", "A", "A", "A", "B", "B", "B", "B"], + } + ) + frame_spec = (0, 11, 5) + regions = ["REGION_A", "REGION_B"] + box = (10, 12, 14) + ds = (2, 3, 4) + dt = 10 + colors = {"A": "#ff0000", "B": "#0000ff"} + group_size = 2 + url = "URL" + jitter = 0 + + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + size = {"x": box[0] * ds[0], "y": box[1] * ds[1], "z": box[2] * ds[2]} + camera_position = { + "x": CAMERA_POSITIONS[simulation_type][0], + "y": CAMERA_POSITIONS[simulation_type][1], + "z": CAMERA_POSITIONS[simulation_type][2], + } + camera_look_at = { + "x": CAMERA_LOOK_AT[simulation_type][0], + "y": CAMERA_LOOK_AT[simulation_type][1], + "z": CAMERA_LOOK_AT[simulation_type][2], + } + type_mapping_entries = [ + ("REGION_A", "A", 0, 0), + ("REGION_B", "A", 0, 0), + ("REGION_A", "A", 1, 0), + ("REGION_B", "A", 1, 0), + ("REGION_A", "A", 0, 5), + ("REGION_B", "A", 0, 5), + ("REGION_A", "B", 1, 5), + ("REGION_B", "B", 1, 5), + ("REGION_A", "B", 0, 10), + ("REGION_B", "B", 0, 10), + ] + type_mapping = { + str(i): { + "name": f"{region}#{category}#{index}#{frame}", + "geometry": { + "displayType": "OBJ", + "url": f"{url}/{frame:06d}_{region}_{index:03d}.MESH.obj", + "color": colors[category], + }, + } + for i, (region, category, index, frame) in enumerate(type_mapping_entries) + } + title = f"ARCADE - {series_key}" + model_info = { + "title": "ARCADE", + "version": simulation_type, + "description": f"Agent-based modeling framework ARCADE for {series_key}.", + } + + def make_bundle_data(object_id, display_index): + position = (0, 0, 0) + rotation = (0, 0, 0) + radius = 1 + subpoints = 0 + + return [ + VIZ_TYPE.DEFAULT, + object_id, + display_index, + *position, + *rotation, + radius, + subpoints, + ] + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0), + *make_bundle_data(1, 1), + *make_bundle_data(2, 2), + *make_bundle_data(3, 3), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 4), + *make_bundle_data(1, 5), + *make_bundle_data(2, 6), + *make_bundle_data(3, 7), + ], + }, + { + "frameNumber": 2, + "time": 2.0 * time_step_size, + "data": [ + *make_bundle_data(0, 8), + *make_bundle_data(1, 9), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_objects( + series_key, + simulation_type, + categories, + regions, + frame_spec, + box, + ds, + dt, + colors, + group_size, + url, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(camera_position, trajectory_info["cameraDefault"]["position"]) + self.assertDictEqual(camera_look_at, trajectory_info["cameraDefault"]["lookAtPosition"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertEqual(title, trajectory_info["trajectoryTitle"]) + self.assertDictEqual(model_info, trajectory_info["modelInfo"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_convert_to_simularium_objects_invalid_type_throws_exception(self): with self.assertRaises(ValueError): simulation_type = "invalid_type" convert_to_simularium_objects( - "", simulation_type, None, (0, 0, 0), [], (0, 0, 0), 0, 0, 0, {}, 0, "" + "", simulation_type, None, [], (0, 0, 0), (0, 0, 0), (0, 0, 0), 0, {}, 0, "", 0 ) diff --git a/tests/arcade_collection/convert/test_convert_to_simularium_shapes.py b/tests/arcade_collection/convert/test_convert_to_simularium_shapes.py index f6fc1dd..3d949a5 100644 --- a/tests/arcade_collection/convert/test_convert_to_simularium_shapes.py +++ b/tests/arcade_collection/convert/test_convert_to_simularium_shapes.py @@ -1,13 +1,780 @@ +import json +import sys import unittest +from math import sqrt +from unittest import mock -from arcade_collection.convert.convert_to_simularium_shapes import convert_to_simularium_shapes +import numpy as np +from simulariumio.constants import VIZ_TYPE +from arcade_collection.convert.convert_to_simularium_shapes import ( + CELL_STATES, + EDGE_TYPES, + approximate_radius_from_voxels, + calculate_patch_size, + convert_hexagonal_to_rectangular_coordinates, + convert_to_simularium_shapes, +) -class TestConvertToSimularium(unittest.TestCase): - def test_convert_to_simularium_shapes_invalid_type_throws_exception(self) -> None: +from .utilities import build_tar_instance + + +class TestConvertToSimulariumShapes(unittest.TestCase): + def test_convert_to_simularium_shapes_patch_invalid_data(self): + simulation_type = "patch" + simularium = convert_to_simularium_shapes( + "", simulation_type, {"invalid": None}, (0, 0, 0), (0, 0, 0), (0, 0, 0), 0, {} + ) + self.assertEqual("", simularium) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_simularium_shapes"], + "random", + return_value=mock.Mock(), + ) + def test_convert_to_simularium_shapes_patch_with_cells(self, random_mock): + series_key = "SERIES_KEY" + + contents = { + f"{series_key}.json": { + "timepoints": [ + { + "time": 0.0, + "cells": [ + [[0, 1, -1, 0], [[0, 1, 0, 4, 27, []]]], + [[0, 1, -1, 1], [[0, 0, 1, 3, 216, []]]], + ], + }, + { + "time": 5.0, + "cells": [ + [[1, 0, -1, 2], [[0, 1, 2, 2, 729, []]]], + [[-1, 0, 1, 3], [[0, 0, 3, 1, 1728, []]]], + ], + }, + { + "time": 10.0, + "cells": [ + [[0, 0, 0, 4], [[0, 1, 4, 0, 3375, []]]], + ], + }, + ] + } + } + + simulation_type = "patch" + data_tars = {"cells": build_tar_instance(contents)} + frame_spec = (0, 11, 5) + box = (2, 0, 0) + ds = (2, 3, 4) + dt = 10 + colors = { + CELL_STATES[0]: "#ff0000", + CELL_STATES[1]: "#00ff00", + CELL_STATES[2]: "#0000ff", + CELL_STATES[3]: "#ff00ff", + CELL_STATES[4]: "#00ffff", + } + resolution = 0 + jitter = 0 + + random_mock.randint.side_effect = [0, 1, 2, 3, 4] + + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + bounds, length, width = calculate_patch_size(box[0], box[1]) + size = {"x": length * ds[0], "y": width * ds[1], "z": box[2] * ds[2]} + type_mapping = { + str(i): { + "name": f"{population}#{state}#{u}{v}{w}{z}{p}", + "geometry": {"displayType": "SPHERE", "color": colors[state]}, + } + for i, (population, u, v, w, z, p, state) in enumerate( + [ + ("POPULATION1", 0, 1, -1, 0, 4, CELL_STATES[0]), + ("POPULATION0", 0, 1, -1, 1, 3, CELL_STATES[1]), + ("POPULATION1", 1, 0, -1, 2, 2, CELL_STATES[2]), + ("POPULATION0", -1, 0, 1, 3, 1, CELL_STATES[3]), + ("POPULATION1", 0, 0, 0, 4, 0, CELL_STATES[4]), + ] + ) + } + + def make_bundle_data(object_id, display_index, radius, uvw, z, offset): + rotation = (0, 0, 0) + subpoints = 0 + x, y = convert_hexagonal_to_rectangular_coordinates(uvw, bounds, offset) + + return [ + VIZ_TYPE.DEFAULT, + object_id, + display_index, + (x - length / 2.0) * ds[0], + (width / 2.0 - y) * ds[1], + z * ds[2], + *rotation, + radius, + subpoints, + ] + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0, 2, (0, 1, -1), 0, 4), + *make_bundle_data(1, 1, 4, (0, 1, -1), 1, 4), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 2, 6, (1, 0, -1), 2, 4), + *make_bundle_data(1, 3, 8, (-1, 0, 1), 3, 4), + ], + }, + { + "frameNumber": 2, + "time": 2.0 * time_step_size, + "data": [ + *make_bundle_data(0, 4, 10, (0, 0, 0), 4, 4), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_shapes( + series_key, + simulation_type, + data_tars, + frame_spec, + box, + ds, + dt, + colors, + resolution, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + @mock.patch.object( + sys.modules["arcade_collection.convert.convert_to_simularium_shapes"], + "random", + return_value=mock.Mock(), + ) + def test_convert_to_simularium_shapes_patch_with_graph(self, random_mock): + series_key = "SERIES_KEY" + + contents = { + f"{series_key}.GRAPH.json": { + "timepoints": [ + { + "time": 0.0, + "graph": [ + [ + [1 * sqrt(3), 2, 3, 0, 0], + [4 * sqrt(3), 5, 6, 0, 0], + [-2, 7, 0, 0, 0, 0, 1], + ], + [ + [8 * sqrt(3), 9, 10, 0, 0], + [11 * sqrt(3), 12, 13, 0, 0], + [-1, 14, 0, 0, 0, 0, 1], + ], + ], + }, + { + "time": 5.0, + "graph": [ + [ + [15 * sqrt(3), 16, 17, 0, 0], + [18 * sqrt(3), 19, 20, 0, 0], + [0, 21, 0, 0, 0, 0, 1], + ], + [ + [22 * sqrt(3), 23, 24, 0, 0], + [25 * sqrt(3), 26, 27, 0, 0], + [1, 28, 0, 0, 0, 0, 1], + ], + ], + }, + { + "time": 10.0, + "graph": [ + [ + [29 * sqrt(3), 30, 31, 0, 0], + [32 * sqrt(3), 33, 34, 0, 0], + [2, 35, 0, 0, 0, 0, 1], + ], + [ + [36 * sqrt(3), 37, 38, 0, 0], + [39 * sqrt(3), 40, 41, 0, 0], + [2, 42, 0, 0, 0, 0, np.nan], + ], + ], + }, + ] + } + } + + simulation_type = "patch" + data_tars = {"graph": build_tar_instance(contents)} + frame_spec = (0, 11, 5) + box = (2, 0, 0) + ds = (2, 3, 4) + dt = 10 + colors = { + EDGE_TYPES[0]: "#ff0000", + EDGE_TYPES[1]: "#00ff00", + EDGE_TYPES[2]: "#0000ff", + EDGE_TYPES[3]: "#ff00ff", + EDGE_TYPES[4]: "#00ffff", + EDGE_TYPES[5]: "#ffff00", + } + resolution = 0 + jitter = 0 + + random_mock.randint.side_effect = [0, 1, 2, 3, 4] + + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + _, length, width = calculate_patch_size(box[0], box[1]) + size = {"x": length * ds[0], "y": width * ds[1], "z": box[2] * ds[2]} + type_mapping = { + str(i): { + "name": f"VASCULATURE#{edge_type}", + "geometry": {"displayType": "FIBER", "color": colors[edge_type]}, + } + for i, edge_type in enumerate(EDGE_TYPES) + } + + def make_bundle_data(object_id, display_index, radius, length, width, point1, point2): + rotation = (0, 0, 0) + subpoints = 6 + x1, y1, z1 = point1 + x2, y2, z2 = point2 + + return [ + VIZ_TYPE.FIBER, + object_id, + display_index, + -length / 2 * ds[0], + width / 2 * ds[1], + 0, + *rotation, + radius, + subpoints, + x1 * ds[0], + -y1 * ds[1], + z1 * ds[2], + x2 * ds[0], + -y2 * ds[1], + z2 * ds[2], + ] + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0, 7, length, width, (1, 2, 3), (4, 5, 6)), + *make_bundle_data(1, 1, 14, length, width, (8, 9, 10), (11, 12, 13)), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 2, 21, length, width, (15, 16, 17), (18, 19, 20)), + *make_bundle_data(1, 3, 28, length, width, (22, 23, 24), (25, 26, 27)), + ], + }, + { + "frameNumber": 2, + "time": 2.0 * time_step_size, + "data": [ + *make_bundle_data(0, 4, 35, length, width, (29, 30, 31), (32, 33, 34)), + *make_bundle_data(1, 5, 42, length, width, (36, 37, 38), (39, 40, 41)), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_shapes( + series_key, + simulation_type, + data_tars, + frame_spec, + box, + ds, + dt, + colors, + resolution, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_convert_to_simularium_shapes_potts_invalid_data(self): + simulation_type = "potts" + simularium = convert_to_simularium_shapes( + "", simulation_type, {"invalid": None}, (0, 0, 0), (0, 0, 0), (0, 0, 0), 0, {} + ) + self.assertEqual("", simularium) + + def test_convert_to_simularium_shapes_potts_resolution_zero(self): + series_key = "SERIES_KEY" + + cells_contents = { + f"{series_key}_000000.CELLS.json": [{"id": 1, "phase": "A"}], + f"{series_key}_000005.CELLS.json": [{"id": 2, "phase": "B"}, {"id": 3, "phase": "A"}], + f"{series_key}_000010.CELLS.json": [{"id": 4, "phase": "B"}], + } + + locs_contents = { + f"{series_key}_000000.LOCATIONS.json": [ + { + "id": 1, + "location": [ + {"region": "DEFAULT", "voxels": [[1, 2, 3], [2, 2, 3], [5, 4, 0]]}, + {"region": "REGION_A", "voxels": [[1, 3, 3], [1, 4, 6]]}, + ], + } + ], + f"{series_key}_000005.LOCATIONS.json": [ + { + "id": 2, + "location": [ + {"region": "DEFAULT", "voxels": [[2, 4, 6], [4, 4, 6], [10, 8, 0]]}, + {"region": "REGION_B", "voxels": [[2, 6, 6], [2, 8, 12]]}, + ], + }, + { + "id": 3, + "location": [ + {"region": "DEFAULT", "voxels": [[3, 6, 9], [6, 6, 9], [15, 12, 0]]}, + {"region": "REGION_C", "voxels": [[3, 9, 9], [3, 12, 18]]}, + ], + }, + ], + f"{series_key}_000010.LOCATIONS.json": [ + { + "id": 4, + "location": [ + {"region": "DEFAULT", "voxels": [[4, 8, 12], [8, 8, 12], [20, 16, 0]]}, + {"region": "REGION_D", "voxels": [[4, 12, 12], [4, 16, 24]]}, + ], + }, + ], + } + + simulation_type = "potts" + data_tars = { + "cells": build_tar_instance(cells_contents), + "locations": build_tar_instance(locs_contents), + } + frame_spec = (0, 11, 5) + box = (10, 20, 30) + ds = (2, 3, 4) + dt = 10 + colors = {"A": "#ff0000", "B": "#00ff00"} + resolution = 0 + jitter = 0 + + length, width, height = box + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + size = {"x": length * ds[0], "y": width * ds[1], "z": height * ds[2]} + type_mapping = { + str(i): { + "name": f"{region}#{phase}#{index}", + "geometry": {"displayType": "SPHERE", "color": colors[phase]}, + } + for i, (region, phase, index) in enumerate( + [ + ("DEFAULT", "A", 1), + ("REGION_A", "A", 1), + ("DEFAULT", "B", 2), + ("REGION_B", "B", 2), + ("DEFAULT", "A", 3), + ("REGION_C", "A", 3), + ("DEFAULT", "B", 4), + ("REGION_D", "B", 4), + ] + ) + } + + def make_bundle_data(object_id, display_index, center, voxels): + cx, cy, cz = center + rotation = (0, 0, 0) + subpoints = 0 + + return [ + VIZ_TYPE.DEFAULT, + object_id, + display_index, + (cx - length / 2.0) * ds[0], + (width / 2.0 - cy) * ds[1], + (cz - height / 2.0) * ds[2], + *rotation, + approximate_radius_from_voxels(voxels), + subpoints, + ] + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0, (2, 3, 3), 5), + *make_bundle_data(1, 1, (1, 3.5, 4.5), 2), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 2, (4, 6, 6), 5), + *make_bundle_data(1, 3, (2, 7, 9), 2), + *make_bundle_data(2, 4, (6, 9, 9), 5), + *make_bundle_data(3, 5, (3, 10.5, 13.5), 2), + ], + }, + { + "frameNumber": 2, + "time": 2.0 * time_step_size, + "data": [ + *make_bundle_data(0, 6, (8, 12, 12), 5), + *make_bundle_data(1, 7, (4, 14, 18), 2), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_shapes( + series_key, + simulation_type, + data_tars, + frame_spec, + box, + ds, + dt, + colors, + resolution, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_convert_to_simularium_shapes_potts_resolution_one(self): + series_key = "SERIES_KEY" + + hollow_cube_voxels = [[x, y, z] for x in range(3) for y in range(3) for z in range(3)] + full_cube_voxels = [[0, 0, 0]] + + expected_hollow_cube_voxels = [ + [x, y, z] + for x in range(3) + for y in range(3) + for z in range(3) + if [x, y, z] != [1, 1, 1] + ] + expected_full_cube_voxels = [[0, 0, 0]] + + cells_contents = { + f"{series_key}_000000.CELLS.json": [{"id": 1, "phase": "A"}], + f"{series_key}_000005.CELLS.json": [{"id": 2, "phase": "B"}], + } + + locs_contents = { + f"{series_key}_000000.LOCATIONS.json": [ + { + "id": 1, + "location": [ + { + "region": "UNDEFINED", + "voxels": hollow_cube_voxels, + } + ], + } + ], + f"{series_key}_000005.LOCATIONS.json": [ + { + "id": 2, + "location": [ + { + "region": "UNDEFINED", + "voxels": full_cube_voxels, + } + ], + } + ], + } + + simulation_type = "potts" + data_tars = { + "cells": build_tar_instance(cells_contents), + "locations": build_tar_instance(locs_contents), + } + frame_spec = (0, 6, 5) + box = (10, 20, 30) + ds = (2, 3, 4) + dt = 10 + colors = {"A": "#ff0000", "B": "#00ff00"} + resolution = 1 + jitter = 0 + + length, width, height = box + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + size = {"x": length * ds[0], "y": width * ds[1], "z": height * ds[2]} + type_mapping = { + "0": { + "name": "UNDEFINED#A#1", + "geometry": {"displayType": "SPHERE", "color": colors["A"]}, + }, + "1": { + "name": "UNDEFINED#B#2", + "geometry": {"displayType": "SPHERE", "color": colors["B"]}, + }, + } + + def make_bundle_data(object_id, display_index, voxels): + bundle_data = [] + rotation = (0, 0, 0) + subpoints = 0 + + for i, (x, y, z) in enumerate(voxels): + bundle_data.extend( + [ + VIZ_TYPE.DEFAULT, + object_id + i, + display_index, + (x - length / 2.0) * ds[0], + (width / 2.0 - y) * ds[1], + (z - height / 2.0) * ds[2], + *rotation, + resolution / 2, + subpoints, + ] + ) + + return bundle_data + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0, expected_hollow_cube_voxels), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 1, expected_full_cube_voxels), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_shapes( + series_key, + simulation_type, + data_tars, + frame_spec, + box, + ds, + dt, + colors, + resolution, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_convert_to_simularium_shapes_potts_resolution_two(self): + series_key = "SERIES_KEY" + + hollow_cube_voxels = [[x, y, z] for x in range(6) for y in range(6) for z in range(6)] + full_cube_voxels = [[x, y, z] for x in range(4) for y in range(4) for z in range(4)] + + expected_hollow_cube_voxels = [ + [2 * x + 0.5, 2 * y + 0.5, 2 * z + 0.5] + for x in range(3) + for y in range(3) + for z in range(3) + if [x, y, z] != [1, 1, 1] + ] + expected_full_cube_voxels = [ + [2 * x + 0.5, 2 * y + 0.5, 2 * z + 0.5] + for x in range(2) + for y in range(2) + for z in range(2) + ] + + cells_contents = { + f"{series_key}_000000.CELLS.json": [{"id": 1, "phase": "A"}], + f"{series_key}_000005.CELLS.json": [{"id": 2, "phase": "B"}], + } + + locs_contents = { + f"{series_key}_000000.LOCATIONS.json": [ + { + "id": 1, + "location": [ + { + "region": "UNDEFINED", + "voxels": hollow_cube_voxels, + } + ], + } + ], + f"{series_key}_000005.LOCATIONS.json": [ + { + "id": 2, + "location": [ + { + "region": "UNDEFINED", + "voxels": full_cube_voxels, + } + ], + } + ], + } + + simulation_type = "potts" + data_tars = { + "cells": build_tar_instance(cells_contents), + "locations": build_tar_instance(locs_contents), + } + frame_spec = (0, 6, 5) + box = (10, 20, 30) + ds = (2, 3, 4) + dt = 10 + colors = {"A": "#ff0000", "B": "#00ff00"} + resolution = 2 + jitter = 0 + + length, width, height = box + start_time, end_time, time_interval = frame_spec + total_steps = len(np.arange(start_time, end_time, time_interval)) + time_step_size = dt * time_interval + size = {"x": length * ds[0], "y": width * ds[1], "z": height * ds[2]} + type_mapping = { + "0": { + "name": "UNDEFINED#A#1", + "geometry": {"displayType": "SPHERE", "color": colors["A"]}, + }, + "1": { + "name": "UNDEFINED#B#2", + "geometry": {"displayType": "SPHERE", "color": colors["B"]}, + }, + } + + def make_bundle_data(object_id, display_index, voxels): + bundle_data = [] + rotation = (0, 0, 0) + subpoints = 0 + + for i, (x, y, z) in enumerate(voxels): + bundle_data.extend( + [ + VIZ_TYPE.DEFAULT, + object_id + i, + display_index, + (x - length / 2.0) * ds[0], + (width / 2.0 - y) * ds[1], + (z - height / 2.0) * ds[2], + *rotation, + resolution / 2, + subpoints, + ] + ) + + return bundle_data + + bundle_data = [ + { + "frameNumber": 0, + "time": 0.0 * time_step_size, + "data": [ + *make_bundle_data(0, 0, expected_hollow_cube_voxels), + ], + }, + { + "frameNumber": 1, + "time": 1.0 * time_step_size, + "data": [ + *make_bundle_data(0, 1, expected_full_cube_voxels), + ], + }, + ] + + simularium = json.loads( + convert_to_simularium_shapes( + series_key, + simulation_type, + data_tars, + frame_spec, + box, + ds, + dt, + colors, + resolution, + jitter, + ) + ) + + trajectory_info = simularium["trajectoryInfo"] + + self.assertEqual(total_steps, trajectory_info["totalSteps"]) + self.assertEqual(time_step_size, trajectory_info["timeStepSize"]) + self.assertDictEqual(size, trajectory_info["size"]) + self.assertDictEqual(type_mapping, trajectory_info["typeMapping"]) + self.assertListEqual(bundle_data, simularium["spatialData"]["bundleData"]) + + def test_convert_to_simularium_shapes_invalid_type_throws_exception(self): with self.assertRaises(ValueError): simulation_type = "invalid_type" - convert_to_simularium_shapes("", simulation_type, {}, (0, 0, 0), (0, 0, 0), 0, 0, 0, {}) + convert_to_simularium_shapes( + "", simulation_type, {}, (0, 0, 0), (0, 0, 0), (0, 0, 0), 0, {} + ) if __name__ == "__main__": diff --git a/tests/arcade_collection/convert/test_convert_to_tfe.py b/tests/arcade_collection/convert/test_convert_to_tfe.py new file mode 100644 index 0000000..870ffb5 --- /dev/null +++ b/tests/arcade_collection/convert/test_convert_to_tfe.py @@ -0,0 +1,72 @@ +import unittest + +import pandas as pd + +from arcade_collection.convert.convert_to_tfe import convert_to_tfe + + +class TestConvertToTFE(unittest.TestCase): + def test_convert_to_tfe(self): + all_data = pd.DataFrame( + { + "TICK": [0, 0, 0, 0, 5, 5, 5, 10, 10, 10, 15, 15], + "ID": [1, 2, 4, 5, 1, 4, 5, 1, 2, 4, 1, 2], + "time": [0, 0, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 1.5, 1.5], + "feature_a": [10, 12, 14, 16, 10, 12, 15, 11, 12, 15, 16, 10], + "feature_b": ["A", "B", "A", "B", "A", "B", "C", "C", "C", "A", "B", "C"], + "feature_c": [1.0, 1.2, 1.4, 1.6, 1.0, 1.2, 1.5, 1.1, 1.2, 1.5, 1.6, 1.0], + } + ) + features = [ + ("feature_a", "Feature A name", "continuous"), + ("feature_b", "Feature B name", "categorical"), + ("feature_c", "Feature C name", "discrete"), + ] + frame_spec = (0, 16, 10) + + expected_manifest = { + "frames": ["frames/frame_0.png", "frames/frame_1.png"], + "features": [ + { + "key": "feature_a", + "name": "Feature A name", + "data": "features/feature_a.json", + "type": "continuous", + }, + { + "key": "feature_b", + "name": "Feature B name", + "data": "features/feature_b.json", + "type": "categorical", + "categories": ["A", "B", "C"], + }, + { + "key": "feature_c", + "name": "Feature C name", + "data": "features/feature_c.json", + "type": "discrete", + }, + ], + "tracks": "tracks.json", + "times": "times.json", + } + + expected_tracks = {"data": [0, 1, 2, 4, 5, 1, 2, 4]} + expected_times = {"data": [0, 0, 0, 0, 0, 1, 1, 1]} + + expected_feature_a = {"data": [0, 10, 12, 14, 16, 11, 12, 15], "min": 10, "max": 16} + expected_feature_b = {"data": [0, 0, 1, 0, 1, 2, 2, 0], "min": 0, "max": 2} + expected_feature_c = {"data": [0, 1, 1.2, 1.4, 1.6, 1.1, 1.2, 1.5], "min": 1, "max": 1.6} + + tfe = convert_to_tfe(all_data, features, frame_spec) + + self.assertDictEqual(expected_manifest, tfe["manifest"]) + self.assertDictEqual(expected_tracks, tfe["tracks"]) + self.assertDictEqual(expected_times, tfe["times"]) + self.assertDictEqual(expected_feature_a, tfe["features"]["feature_a"]) + self.assertDictEqual(expected_feature_b, tfe["features"]["feature_b"]) + self.assertDictEqual(expected_feature_c, tfe["features"]["feature_c"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/arcade_collection/convert/utilities.py b/tests/arcade_collection/convert/utilities.py new file mode 100644 index 0000000..ad6123c --- /dev/null +++ b/tests/arcade_collection/convert/utilities.py @@ -0,0 +1,16 @@ +import io +import json +import tarfile + + +def build_tar_instance(contents): + buffer = io.BytesIO() + + with tarfile.open(fileobj=buffer, mode="w") as tar: + for file_key, content in contents.items(): + byte_array = json.dumps(content).encode("utf-8") + info = tarfile.TarInfo(file_key) + info.size = len(byte_array) + tar.addfile(info, io.BytesIO(byte_array)) + + return tarfile.open(fileobj=io.BytesIO(buffer.getvalue()), mode="r")