From 1db63dd3db5265ddb36e7fb64235ebeb7caad087 Mon Sep 17 00:00:00 2001 From: Ma Nan Date: Thu, 4 Jul 2024 13:05:46 +0800 Subject: [PATCH] feat: rewrite resize add more nodes --- .github/workflows/upload-assets.yml | 9 +- README.md | 18 +- bioxelnodes/__init__.py | 4 +- .../assets/Nodes/BioxelNodes_4.1.blend | 4 +- bioxelnodes/customnodes/menus.py | 12 +- bioxelnodes/io.py | 437 ++-- bioxelnodes/menus.py | 109 +- bioxelnodes/misc.py | 14 +- bioxelnodes/nodes.py | 173 +- bioxelnodes/operators.py | 758 +++++-- bioxelnodes/scipy/__init__.py | 2 + bioxelnodes/scipy/_filters.py | 1849 +++++++++++++++++ bioxelnodes/scipy/_interpolation.py | 313 +++ .../scipy/_nd_image.cp311-win_amd64.dll.a | Bin 0 -> 1568 bytes bioxelnodes/scipy/_ni_support.py | 119 ++ bioxelnodes/scipy/_utils.py | 10 + bioxelnodes/skimage/__init__.py | 1 + bioxelnodes/skimage/_utils.py | 37 + bioxelnodes/skimage/_warps.py | 318 +++ bioxelnodes/skimage/dtype.py | 600 ++++++ bioxelnodes/utils.py | 30 +- extension/__init__.py | 2 +- extension/blender_manifest.toml | 63 +- poetry.lock | 264 ++- pyproject.toml | 3 +- 25 files changed, 4653 insertions(+), 496 deletions(-) create mode 100644 bioxelnodes/scipy/__init__.py create mode 100644 bioxelnodes/scipy/_filters.py create mode 100644 bioxelnodes/scipy/_interpolation.py create mode 100644 bioxelnodes/scipy/_nd_image.cp311-win_amd64.dll.a create mode 100644 bioxelnodes/scipy/_ni_support.py create mode 100644 bioxelnodes/scipy/_utils.py create mode 100644 bioxelnodes/skimage/__init__.py create mode 100644 bioxelnodes/skimage/_utils.py create mode 100644 bioxelnodes/skimage/_warps.py create mode 100644 bioxelnodes/skimage/dtype.py diff --git a/.github/workflows/upload-assets.yml b/.github/workflows/upload-assets.yml index 02db20a..fa506cb 100644 --- a/.github/workflows/upload-assets.yml +++ b/.github/workflows/upload-assets.yml @@ -58,7 +58,7 @@ jobs: - name: Checkout Code uses: actions/checkout@v4 with: - lfs: 'true' + lfs: "true" - name: Zip Add-on run: | zip -r package.zip bioxelnodes @@ -72,7 +72,7 @@ jobs: asset_path: ./package.zip asset_name: BioxelNodes_Addon_${{ needs.draft_release.outputs.version }}.zip asset_content_type: application/zip - + upload_blender_extension: name: Upload Blender Extension needs: draft_release @@ -81,15 +81,14 @@ jobs: - name: Checkout Code uses: actions/checkout@v4 with: - lfs: 'true' + lfs: "true" - name: Set up Python uses: actions/setup-python@v4 with: python-version: 3.11 - name: Zip Extension run: | - pip download SimpleITK --dest bioxelnodes/wheels --only-binary=:all: --python-version=3.11 --platform=macosx_11_0_arm64 - pip download SimpleITK --dest bioxelnodes/wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 + pip download SimpleITK==2.3.1 --dest bioxelnodes/wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 rm -r bioxelnodes/externalpackage cp extension/__init__.py bioxelnodes/__init__.py cp extension/preferences.py bioxelnodes/preferences.py diff --git a/README.md b/README.md index 8594084..af32c89 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,23 @@ [中文文档](https://uj6xfhbzp0.feishu.cn/wiki/Qx3VwHuNPimeI8kr6nDcvl1DnHf?from=from_copylink) -# Bioxel Nodes +# 🦖 Bioxel Nodes ![Static Badge](https://img.shields.io/badge/Blender-orange?style=for-the-badge&logo=blender&logoColor=white) ![GitHub License](https://img.shields.io/github/license/OmooLab/BioxelNodes?style=for-the-badge) ![GitHub Release](https://img.shields.io/github/v/release/OmooLab/BioxelNodes?style=for-the-badge) ![GitHub Repo stars](https://img.shields.io/github/stars/OmooLab/BioxelNodes?style=for-the-badge) -Bioxel Nodes is a Blender add-on for scientific volumetric data visualization. It using Blender's powerful Geometry Nodes | Cycles to process and render volumetric data. +Bioxel Nodes is a Blender extension for scientific volumetric data visualization. It using Blender's powerful Geometry Nodes and Cycles to process and render volumetric data. You are free to share your blender file to anyone who does not install this extension, since most processes were done by Blender's native nodes. ## About -Before us, there have been many tutorials and add-ons for importing volumetric data into Blender. However, we found that there were many details that were not addressed in place, some scientific facts were ignored, and the volume rendering was not pretty enough. With Bioxel Nodes, you can easily import the volumetric data into Blender, and more importantly, it can quickly make a beautiful realistic rendering of it. +Before us, there have been many tutorials and extensions for importing volumetric data into Blender. However, we found that there were many scientific issues that were not addressed in place, and the volume render results were not epic. With Bioxel Nodes, you can easily import any format volumetric data into Blender, and more importantly, make a beautiful realistic volume rendering quickly. Below are some examples with Bioxel Nodes. Thanks to Cycles Render, the volumetric data can be rendered with great detail: -![cover](docs/assets/cover.png) +![cover](https://omoolab.github.io/BioxelNodes/latest/assets/cover.png) -The "Bioxel" in "Bioxel Nodes", is a combination of the words "Bio-" and "Voxel". Bioxel is a voxel that stores biological data. We are developing a toolkit around Bioxel for better biological data visualization. but before its release, we made this Blender version of bioxels toolkit first, in order to let more people to have fun with volumetric data. [Getting Started](https://omoolab.github.io/BioxelNodes/latest/getting-started) +So how to use this extension? please check [Getting Started](https://omoolab.github.io/BioxelNodes/latest/getting-started) ## Supported Format @@ -44,16 +44,16 @@ The "Bioxel" in "Bioxel Nodes", is a combination of the words "Bio-" and "Voxel" - Sections cannot be generated (will be supported soon) - Time sequence volume not supported (will be supported soon) -## To Upgrade Add-on +## Upgrade from 0.1.x to 0.2.x -To upgrade from an older version of the add-on to the latest, you need to do the following: +You need to do the following: 1. Remove the old version of Bioxel Nodes at Preferences > Add-ons 2. Add the new version and restart Blender. -It is not support editing the same blender file across add-on versions. In order to make sure that the previous file works properly. You need to save the staged data before upgrading ( read the last section of [Getting Started](https://omoolab.github.io/BioxelNodes/latest/getting-started/#share-your-file) ). +It is not support editing the same blender file across extension versions. In order to make sure that the previous file works properly. You need to save the staged data before upgrading ( read the last section of [Getting Started](https://omoolab.github.io/BioxelNodes/latest/getting-started/#share-your-file) ). -But even then, there is still no guarantee that the new version of the add-on will work on the old blender file. Therefore, it is highly recommended to open a new blender file to start the creating, not based on the old one. +But even then, there is still no guarantee that the new version of the extension will work on the old blender file. Therefore, it is highly recommended to open a new blender file to start the creating, not based on the old one. Alternatively, objects from the old file that have nothing to do with Bioxel Nodes could be append to the new blender file. diff --git a/bioxelnodes/__init__.py b/bioxelnodes/__init__.py index 7c982eb..4185846 100644 --- a/bioxelnodes/__init__.py +++ b/bioxelnodes/__init__.py @@ -9,7 +9,7 @@ "author": "Ma Nan", "description": "", "blender": (4, 0, 0), - "version": (0, 2, 0), + "version": (0, 2, 3), "location": "File -> Import", "warning": "", "category": "Node" @@ -22,7 +22,7 @@ def register(): auto_load.register() menus.add() bpy.types.Scene.bioxel_layer_dir = bpy.props.StringProperty( - name="Bioxel Layers Directory", + name="Layer Directory", subtype='DIR_PATH', default="//" ) diff --git a/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend b/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend index 992f1f8..0c739fd 100644 --- a/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend +++ b/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9dc863260e46ad32b0101f9e082a4c7162ccf1e1df7a8ea678d02961d9866d99 -size 4702346 +oid sha256:3b8abe6abfbaa3ad7db74dfcd038006a29d7fa9773d37ff4b4dd2201d3d59e9a +size 6164162 diff --git a/bioxelnodes/customnodes/menus.py b/bioxelnodes/customnodes/menus.py index 7b129d1..9ed668e 100644 --- a/bioxelnodes/customnodes/menus.py +++ b/bioxelnodes/customnodes/menus.py @@ -6,7 +6,7 @@ class SaveAllNodes(bpy.types.Operator): bl_idname = "customnodes.save_all_nodes" - bl_label = "Save All Custom Nodes" + bl_label = "Save All Nodes" bl_description = "Save All Custom Nodes to Directory." bl_options = {'UNDO'} @@ -21,9 +21,9 @@ def execute(self, context): for file in files: file_name = Path(file).name # "//" - custom_nodes_dir = bpy.path.abspath(context.scene.custom_nodes_dir) + customnodes_node_dir = bpy.path.abspath(context.scene.customnodes_node_dir) - output_path: Path = Path(custom_nodes_dir, file_name).resolve() + output_path: Path = Path(customnodes_node_dir, file_name).resolve() source_path: Path = Path(file).resolve() if output_path != source_path: @@ -51,7 +51,7 @@ class CUSTOMNODES_PT_CustomNodes(bpy.types.Panel): def draw(self, context): layout = self.layout scene = context.scene - layout.prop(scene, 'custom_nodes_dir') + layout.prop(scene, 'customnodes_node_dir') layout.operator(SaveAllNodes.bl_idname) @@ -181,8 +181,8 @@ def register(self): bpy.utils.register_class(cls) bpy.types.NODE_MT_add.append(self.add_node_menu) - bpy.types.Scene.custom_nodes_dir = bpy.props.StringProperty( - name="Nodes Directory", + bpy.types.Scene.customnodes_node_dir = bpy.props.StringProperty( + name="Node Directory", subtype='DIR_PATH', default="//" ) diff --git a/bioxelnodes/io.py b/bioxelnodes/io.py index 98c4cde..816fdce 100644 --- a/bioxelnodes/io.py +++ b/bioxelnodes/io.py @@ -1,3 +1,4 @@ +import math import bpy import shutil from bpy_extras.io_utils import axis_conversion @@ -8,17 +9,18 @@ import mathutils import random +from . import skimage as ski from .nodes import custom_nodes from .props import BIOXELNODES_Series -from .utils import (calc_bbox_verts, get_all_layers, get_container, get_layer, get_text_index_str, - get_node_by_type, hide_in_ray, lock_transform, show_message) +from .utils import (calc_bbox_verts, get_all_layers, get_container_from_selection, get_layer, get_text_index_str, + get_node_by_type, hide_in_ray, lock_transform, save_vdb, show_message) try: import SimpleITK as sitk except: ... -SUPPORT_EXTS = ['.dcm', '.DCM', '.DICOM', +SUPPORT_EXTS = ['', '.dcm', '.DCM', '.DICOM', '.bmp', '.BMP', '.PIC', '.pic', '.gipl', '.gipl.gz', @@ -40,9 +42,9 @@ '.tif', '.TIF', '.tiff', '.TIFF', '.png', '.PNG'] -DICOM_EXTS = ['.dcm', '.DCM', '.DICOM'] +DICOM_EXTS = ['', '.dcm', '.DCM', '.DICOM'] -FH_EXTS = ['.dcm', '.DCM', '.DICOM', +FH_EXTS = ['', '.dcm', '.DCM', '.DICOM', '.gipl', '.gipl.gz', '.mnc', '.MNC', '.mrc', '.rec', @@ -64,6 +66,14 @@ def get_layer_shape(bioxel_size: float, orig_shape: tuple, orig_spacing: tuple): shape[2] if shape[2] > 0 else 1) +def get_layer_size(shape: tuple, bioxel_size: float, scale: float = 1.0): + size = (float(shape[0] * bioxel_size * scale), + float(shape[1] * bioxel_size * scale), + float(shape[2] * bioxel_size * scale)) + + return size + + def get_ext(filepath: str) -> str: file_path = Path(filepath) if file_path.name.endswith(".nii.gz"): @@ -90,7 +100,7 @@ def get_sequence_index(filepath: str) -> int: return int(index) if index else 0 -def collect_image_sequence(filepath: str): +def collect_sequence(filepath: str): file_path = Path(filepath).resolve() files = list(file_path.parent.iterdir()) @@ -103,7 +113,7 @@ def collect_image_sequence(filepath: str): return sequence -def read_image(filepath: str, series_id=""): +def parse_volume_data(filepath: str, series_id=""): ext = get_ext(filepath) if ext in DICOM_EXTS: @@ -114,37 +124,42 @@ def read_image(filepath: str, series_id=""): series_files = reader.GetGDCMSeriesFileNames( str(dir_path), series_id) reader.SetFileNames(series_files) - image = reader.Execute() + itk_volume = reader.Execute() name = dir_path.name elif ext in SEQUENCE_EXTS: - image = sitk.ReadImage(filepath) - if image.GetDimension() == 2: - sequence = collect_image_sequence(filepath) - image = sitk.ReadImage(sequence) + itk_volume = sitk.ReadImage(filepath) + if itk_volume.GetDimension() == 2: + sequence = collect_sequence(filepath) + itk_volume = sitk.ReadImage(sequence) name = get_sequence_name(filepath) else: - image = sitk.ReadImage(filepath) + itk_volume = sitk.ReadImage(filepath) name = Path(filepath).name.removesuffix(ext) else: - image = sitk.ReadImage(filepath) + itk_volume = sitk.ReadImage(filepath) name = Path(filepath).name.removesuffix(ext) - return image, name + itk_volume = sitk.DICOMOrient(itk_volume, 'RAS') + volume = sitk.GetArrayFromImage(itk_volume) + if volume.ndim == 4: + volume = np.transpose(volume, (2, 1, 0, 3)) + else: + volume = np.transpose(volume) -def rgb2gray(image): - # Convert sRGB image to gray scale and rescale results to [0,255] - layers = [sitk.VectorIndexSelectionCast( - image, i, sitk.sitkFloat32) for i in range(image.GetNumberOfComponentsPerPixel())] - # linear mapping - I = (0.2126*layers[0] + 0.7152*layers[1] + 0.0722*layers[2]) - - return sitk.Cast(I, sitk.sitkFloat32) + meta = { + "name": name, + "shape": tuple(itk_volume.GetSize()), + "spacing": tuple(itk_volume.GetSpacing()), + "origin": tuple(itk_volume.GetOrigin()), + "direction": tuple(itk_volume.GetDirection()), + } + for key, value in meta.items(): + print(f"{key}: {value}") -def x2gray(image): - return sitk.VectorIndexSelectionCast(image, 0, sitk.sitkUInt16) + return volume, meta class ImportVolumeDataDialog(bpy.types.Operator): @@ -167,14 +182,6 @@ class ImportVolumeDataDialog(bpy.types.Operator): series_id: bpy.props.StringProperty() # type: ignore - resample_method: bpy.props.EnumProperty( - name="Resample Method", - default="linear", - items=[("linear", "Linear", ""), - ("nearest_neighbor", "Nearest Neighbor", ""), - ("gaussian", "Gaussian", "")] - ) # type: ignore - container: bpy.props.StringProperty() # type: ignore read_as: bpy.props.EnumProperty( @@ -203,7 +210,7 @@ class ImportVolumeDataDialog(bpy.types.Operator): scene_scale: bpy.props.FloatProperty( name="Scene Scale (Bioxel Unit pre Blender Unit)", - soft_min=0.001, soft_max=100.0, + soft_min=0.0001, soft_max=10.0, min=1e-6, max=1e6, default=0.01, ) # type: ignore @@ -213,95 +220,21 @@ class ImportVolumeDataDialog(bpy.types.Operator): default=True, ) # type: ignore - invert_scalar: bpy.props.BoolProperty( - name="Invert Scalar (Background value maybe higher than object)", - default=False, - ) # type: ignore - def execute(self, context): is_first_import = len(get_all_layers()) == 0 - image, name = read_image(self.filepath, self.series_id) - container_name = self.container_name or name + volume, meta = parse_volume_data(self.filepath) + container_name = self.container_name or meta['name'] or "Container" bioxel_size = self.bioxel_size orig_spacing = self.orig_spacing - image_spacing = image.GetSpacing() - image_shape = image.GetSize() layer_spacing = ( - image_spacing[0] / orig_spacing[0] * bioxel_size, - image_spacing[1] / orig_spacing[1] * bioxel_size, - image_spacing[2] / orig_spacing[2] * bioxel_size + meta['spacing'][0] / orig_spacing[0] * bioxel_size, + meta['spacing'][1] / orig_spacing[1] * bioxel_size, + meta['spacing'][2] / orig_spacing[2] * bioxel_size ) layer_shape = get_layer_shape( - bioxel_size, image_shape, orig_spacing) - - if self.read_as == "labels": - if "vector" in image.GetPixelIDTypeAsString(): - print("Conveting to Grayscale...") - image = x2gray(image) - else: - image = sitk.Cast(image, sitk.sitkUInt16) - default_value = 0 - elif self.read_as == "scalar": - if "vector" in image.GetPixelIDTypeAsString(): - print("Conveting to Grayscale...") - image = rgb2gray(image) - else: - image = sitk.Cast(image, sitk.sitkFloat32) - - stats = sitk.StatisticsImageFilter() - stats.Execute(image) - default_value = stats.GetMaximum() if self.invert_scalar else stats.GetMinimum() - - if self.resample_method == "linear": - interpolator = sitk.sitkLinear - elif self.resample_method == "nearest_neighbor": - interpolator = sitk.sitkNearestNeighbor - elif self.resample_method == "gaussian": - interpolator = sitk.sitkGaussian - - if self.read_as == "labels": - interpolator = sitk.sitkNearestNeighbor - - print(f"Resampling...") - image = sitk.Resample( - image1=image, - size=layer_shape, - transform=sitk.Transform(), - interpolator=interpolator, - outputOrigin=image.GetOrigin(), - outputSpacing=layer_spacing, - outputDirection=image.GetDirection(), - defaultPixelValue=default_value, - outputPixelType=image.GetPixelID(), - ) - - if self.do_orient: - print("Orienting to RAS...") - image = sitk.DICOMOrient(image, 'RAS') - - print("Oriented Origin:", image.GetOrigin()) - print("Oriented Direction:", image.GetDirection()) - - # return {'FINISHED'} - - # ITK indices, by convention, are [i,j,k] while NumPy indices are [k,j,i] - # https://www.slicer.org/wiki/Coordinate_systems - - # ITK Numpy 3D - # R (ight) i -> k -> x - # A (nterior) j -> j -> y - # S (uperior) k -> i -> z - array = sitk.GetArrayFromImage(image) - orig_dtype = str(array.dtype) - # print(f"Coverting Dtype from {orig_dtype} to float...") - # array = array.astype(float) - - if array.ndim == 4: - array = array[:, :, :, 0] - - array = np.transpose(array) + bioxel_size, meta['shape'], orig_spacing) # After sitk.DICOMOrient(), origin and direction will also orient base on LPS # so we need to convert them into RAS @@ -312,23 +245,22 @@ def execute(self, context): to_up='Y' ).to_4x4() - origin = image.GetOrigin() - direction = image.GetDirection() - mat_location = mathutils.Matrix.Translation( - mathutils.Vector(origin) + mathutils.Vector(meta['origin']) ) mat_rotation = mathutils.Matrix( - np.array(direction).reshape((3, 3)) + np.array(meta['direction']).reshape((3, 3)) ).to_4x4() mat_scale = mathutils.Matrix.Scale( bioxel_size, 4 ) - transfrom = mat_lps2ras @ mat_location @ mat_rotation @ mat_scale \ - if self.do_orient else mat_location @ mat_rotation @ mat_scale + # transfrom = mat_lps2ras @ mat_location @ mat_rotation @ mat_scale \ + # if self.do_orient else mat_location @ mat_rotation @ mat_scale + + transfrom = mat_lps2ras @ mat_location @ mat_rotation @ mat_scale # Wrapper a Container if not self.container: @@ -353,7 +285,7 @@ def execute(self, context): ) container = bpy.context.active_object - bbox_verts = calc_bbox_verts((0, 0, 0), array.shape) + bbox_verts = calc_bbox_verts((0, 0, 0), volume.shape) for index, vert in enumerate(container.data.vertices): bbox_transform = transfrom vert.co = bbox_transform @ mathutils.Vector(bbox_verts[index]) @@ -370,49 +302,24 @@ def execute(self, context): 'NodeGroupInput')[0] container_node_tree.links.remove(input_node.outputs[0].links[0]) - # bpy.ops.mesh.primitive_cube_add( - # enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1) - # ) - # frame = bpy.context.active_object - # bbox_verts = calc_bbox_verts((0, 0, 0), array.shape) - # for index, vert in enumerate(frame.data.vertices): - # # bbox_transform = mat_ras2blender @ mat_scene_scale @ transfrom - # bbox_transform = transfrom - # vert.co = bbox_transform @ mathutils.Vector(bbox_verts[index]) - - # frame.name = f"Frame_{container_name}" - # frame.data.name = f"Frame_{container_name}" - # lock_transform(frame) - # hide_in_ray(frame) - # frame.hide_select = True - # frame.hide_render = True - # frame.display_type = 'WIRE' - # frame.parent = container - else: container = bpy.data.objects[self.container] container_node_tree = container.modifiers[0].node_group preferences = context.preferences.addons[__package__].preferences - cache_dir = Path(preferences.cache_dir, 'VDBs') - cache_dir.mkdir(parents=True, exist_ok=True) - loc, rot, sca = transfrom.decompose() layer_origin = tuple(loc) layer_rotation = tuple(rot.to_euler()) - layer_shape = tuple(array.shape) - def create_layer(array, layer_name, layer_type="scalar"): + def create_layer(volume, layer_name, layer_type="scalar"): grid = vdb.FloatGrid() - array = array.copy().astype(float) - grid.copyFromArray(array) + volume = volume.copy().astype(np.float32) + grid.copyFromArray(volume) grid.transform = vdb.createLinearTransform(transfrom.transposed()) grid.name = layer_type - vdb_path = Path(cache_dir, f"{uuid4()}.vdb") - print(f"Storing the cache ({str(vdb_path)})...") - vdb.write(str(vdb_path), grids=[grid]) + vdb_path = save_vdb([grid], context) # Read VDB print(f"Loading the cache to Blender scene...") @@ -469,51 +376,66 @@ def create_layer(array, layer_name, layer_type="scalar"): return layer if self.read_as == "labels": - orig_max = int(np.max(array)) - orig_min = int(np.min(array)) + if volume.ndim == 4: + volume = np.amax(volume, -1) + volume = volume.astype(int) + orig_max = int(np.max(volume)) + orig_min = int(np.min(volume)) layer_name = self.layer_name or "Label" for i in range(orig_max): - label = array == np.full_like(array, i+1) - layer = create_layer(array=label, + label = volume == np.full_like(volume, i+1) + print(f"Resampling...") + label = ski.resize(label.astype(np.float32), + layer_shape, + anti_aliasing=True) + layer = create_layer(volume=label, layer_name=f"{container_name}_{layer_name}_{i+1}", layer_type="label") - output_node = get_node_by_type(container_node_tree.nodes, - 'NodeGroupOutput')[0] + # Deselect all nodes first + for node in container_node_tree.nodes: + if node.select: + node.select = False + mask_node = custom_nodes.add_node(container_node_tree.nodes, 'BioxelNodes_MaskByLabel') + mask_node.label = f"{layer_name}_{i+1}" mask_node.inputs[0].default_value = layer + + # Connect to output if no output linked + output_node = get_node_by_type(container_node_tree.nodes, + 'NodeGroupOutput')[0] if len(output_node.inputs[0].links) == 0: container_node_tree.links.new(mask_node.outputs[0], output_node.inputs[0]) else: - if self.invert_scalar: - array = -array + if volume.ndim == 4: + volume = np.amax(volume, -1) + # volume = skimage.color.rgb2gray(volume) - orig_max = float(np.max(array)) - orig_min = float(np.min(array)) - orig_median = float(np.median(array)) - orig_percentile80 = float(np.percentile(array, 80)) \ - if self.invert_scalar else float(np.percentile(array, 80)) + if volume.dtype.kind == 'u': + imax_in = np.iinfo(volume.dtype).max + volume = np.multiply(volume, 255.0 / imax_in, dtype=np.float32) + elif volume.dtype.kind == 'i': + volume = volume.astype(np.float32) - stats_table = [("Max", orig_max), - ("Min", orig_min), - ("Median", orig_median), - ("80%", orig_percentile80)] + print(f"Resampling...") + volume = ski.resize(volume, + layer_shape, + anti_aliasing=True) - print("Volume Data Stats:") - for stats in stats_table: - print("| {: >10} | {: >40} |".format(*stats)) + orig_max = float(np.max(volume)) + orig_min = float(np.min(volume)) scalar_offset = 0 if orig_min < 0: scalar_offset = -orig_min - array = array + np.full_like(array, scalar_offset) + volume = volume + np.full_like(volume, scalar_offset) layer_name = self.layer_name or "Scalar" - layer = create_layer(array=array, + layer = create_layer(volume=volume, layer_name=f"{container_name}_{layer_name}", layer_type="scalar") @@ -523,12 +445,18 @@ def create_layer(array, layer_name, layer_type="scalar"): to_layer_node.inputs['Scalar Max'].default_value = orig_max to_layer_node.inputs['Scalar Min'].default_value = orig_min - output_node = get_node_by_type(container_node_tree.nodes, - 'NodeGroupOutput')[0] + # Deselect all nodes first + for node in container_node_tree.nodes: + if node.select: + node.select = False mask_node = custom_nodes.add_node(container_node_tree.nodes, 'BioxelNodes_MaskByThreshold') + mask_node.label = layer_name mask_node.inputs[0].default_value = layer + # Connect to output if no output linked + output_node = get_node_by_type(container_node_tree.nodes, + 'NodeGroupOutput')[0] if len(output_node.inputs[0].links) == 0: container_node_tree.links.new(mask_node.outputs[0], output_node.inputs[0]) @@ -538,50 +466,64 @@ def create_layer(array, layer_name, layer_type="scalar"): # Change render setting for better result if preferences.do_change_render_setting and is_first_import: bpy.context.scene.render.engine = 'CYCLES' - bpy.context.scene.cycles.volume_bounces = 12 - bpy.context.scene.cycles.transparent_max_bounces = 16 - bpy.context.scene.cycles.volume_preview_step_rate = 10 + try: + bpy.context.scene.cycles.volume_bounces = 12 + bpy.context.scene.cycles.transparent_max_bounces = 16 + bpy.context.scene.cycles.volume_preview_step_rate = 10 + bpy.context.scene.cycles.volume_step_rate = 10 + bpy.context.scene.eevee.volumetric_tile_size = '2' + bpy.context.scene.eevee.volumetric_shadow_samples = 128 + bpy.context.scene.eevee.volumetric_samples = 256 + except: + pass self.report({"INFO"}, "Successfully Imported") return {'FINISHED'} def invoke(self, context, event): - context.window_manager.invoke_props_dialog(self, width=500) + if self.read_as == "labels": + volume_dtype = "Label" + elif self.read_as == "scalar": + volume_dtype = "Scalar" + title = f"Import '{volume_dtype}' Layer (Add to Container: {self.container})" \ + if self.container != "" else f"Import '{volume_dtype}' Layer (Init a Container)" + context.window_manager.invoke_props_dialog(self, + width=500, + title=title) return {'RUNNING_MODAL'} def draw(self, context): - layer_shape = get_layer_shape( - self.bioxel_size, self.orig_shape, self.orig_spacing) + layer_shape = get_layer_shape(self.bioxel_size, + self.orig_shape, + self.orig_spacing) + layer_size = get_layer_size(layer_shape, + self.bioxel_size, + self.scene_scale) + bioxel_count = layer_shape[0] * layer_shape[1] * layer_shape[2] - text = f"Shape will be: {str(layer_shape)} {bioxel_count:,} " + layer_shape_text = f"Shape will be: {str(layer_shape)} {bioxel_count:,} " if bioxel_count > 100000000: - text += "**TOO LARGE!**" + layer_shape_text += "**TOO LARGE!**" + + layer_size_text = f"Size will be: ({layer_size[0]:.2f}, {layer_size[1]:.2f}, {layer_size[2]:.2f}) m" layout = self.layout panel = layout.box() - panel.prop(self, "container_name") + if self.container == "": + panel.prop(self, "container_name") panel.prop(self, "layer_name") panel = layout.box() - panel.prop(self, "resample_method") panel.prop(self, "bioxel_size") row = panel.row() row.prop(self, "orig_spacing") - panel.label(text=text) - - panel = layout.box() - panel.prop(self, "do_orient") - - panel = layout.box() - panel.prop(self, "read_as") - if self.read_as == "labels": - ... - else: - panel.prop(self, "invert_scalar") + panel.label(text=layer_shape_text) - panel = layout.box() - panel.prop(self, "scene_scale") + if self.container == "": + panel = layout.box() + panel.prop(self, "scene_scale") + panel.label(text=layer_size_text) def get_series_ids(self, context): @@ -607,6 +549,13 @@ class ParseVolumeData(bpy.types.Operator): directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore container: bpy.props.StringProperty() # type: ignore + read_as: bpy.props.EnumProperty( + name="Read as", + default="scalar", + items=[("scalar", "Scalar", ""), + ("labels", "Labels", "")] + ) # type: ignore + series_id: bpy.props.EnumProperty( name="Select Series", items=get_series_ids @@ -622,31 +571,24 @@ def execute(self, context): return {'CANCELLED'} print("Collecting Meta Data...") - image, name = read_image(self.filepath, self.series_id) - - stats_table = [("Shape", str(image.GetSize())), - ("Spacing", str(image.GetSpacing())), - ("Origin", str(image.GetOrigin())), - ("Direction", str(image.GetDirection())), - ("Data Type", image.GetPixelIDTypeAsString())] - for k in image.GetMetaDataKeys(): - stats_table.append((k, str(image.GetMetaData(k)))) - - print("Meta Data:") - for stats in stats_table: - print("| {: >20} | {: >100} |".format(*stats)) - + volume, meta = parse_volume_data(self.filepath) do_orient = ext not in SEQUENCE_EXTS or ext in DICOM_EXTS - orig_shape = image.GetSize() - orig_spacing = image.GetSpacing() + orig_shape = meta['shape'] + orig_spacing = meta['spacing'] min_size = min(orig_spacing[0], orig_spacing[1], orig_spacing[2]) bioxel_size = max(min_size, 1.0) + + layer_size = get_layer_size(orig_shape, + bioxel_size) + log10 = math.floor(math.log10(max(*layer_size))) + scene_scale = math.pow(10, -log10) + if self.container: container = bpy.data.objects[self.container] container_name = container.name else: - container_name = name + container_name = meta['name'] bpy.ops.bioxelnodes.import_volume_data_dialog( 'INVOKE_DEFAULT', @@ -657,7 +599,9 @@ def execute(self, context): bioxel_size=bioxel_size, series_id=self.series_id or "", do_orient=do_orient, - container=self.container + container=self.container, + read_as=self.read_as, + scene_scale=scene_scale ) self.report({"INFO"}, "Successfully Readed.") @@ -705,21 +649,32 @@ def draw(self, context): text="Please be patient...") -class ImportVolumeData(bpy.types.Operator): - bl_idname = "bioxelnodes.import_volume_data" - bl_label = "Volume Data as Bioxel Layer" - bl_description = "Import Volume Data as Bioxel Layer" +class ImportVolumeData(): bl_options = {'UNDO'} filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore + read_as = "scalar" + def execute(self, context): - bpy.ops.bioxelnodes.parse_volume_data( - 'INVOKE_DEFAULT', - filepath=self.filepath, - directory=self.directory - ) + containers = get_container_from_selection() + + if len(containers) > 0: + bpy.ops.bioxelnodes.parse_volume_data( + 'INVOKE_DEFAULT', + filepath=self.filepath, + directory=self.directory, + container=containers[0].name, + read_as=self.read_as + ) + else: + bpy.ops.bioxelnodes.parse_volume_data( + 'INVOKE_DEFAULT', + filepath=self.filepath, + directory=self.directory + ) + return {'FINISHED'} def invoke(self, context, event): @@ -727,6 +682,20 @@ def invoke(self, context, event): return {'RUNNING_MODAL'} +class ImportAsScalarLayer(bpy.types.Operator, ImportVolumeData): + bl_idname = "bioxelnodes.import_as_scalar_layer" + bl_label = "Import as Scale Layer" + bl_description = "Import Volume Data to Container as Scale Layer" + read_as = "scalar" + + +class ImportAsLabelLayer(bpy.types.Operator, ImportVolumeData): + bl_idname = "bioxelnodes.import_as_label_layer" + bl_label = "Import as Label Layer" + bl_description = "Import Volume Data to Container as Label Layer" + read_as = "labels" + + try: class BIOXELNODES_FH_ImportVolumeData(bpy.types.FileHandler): bl_idname = "BIOXELNODES_FH_ImportVolumeData" @@ -743,8 +712,8 @@ def poll_drop(cls, context): class AddVolumeData(bpy.types.Operator): bl_idname = "bioxelnodes.add_volume_data" - bl_label = "Add Volume Data to Container" - bl_description = "Add Volume Data to Container" + bl_label = "Import as Bioxel Layer" + bl_description = "Import additional Volume Data to Container" bl_options = {'UNDO'} filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore @@ -752,17 +721,17 @@ class AddVolumeData(bpy.types.Operator): @classmethod def poll(cls, context): - container = get_container(bpy.context.active_object) - return True if container else False + containers = get_container_from_selection() + return len(containers) > 0 def execute(self, context): - container = get_container(bpy.context.active_object) + containers = get_container_from_selection() bpy.ops.bioxelnodes.parse_volume_data( 'INVOKE_DEFAULT', filepath=self.filepath, directory=self.directory, - container=container.name + container=containers[0].name ) return {'FINISHED'} @@ -771,9 +740,9 @@ def invoke(self, context, event): return {'RUNNING_MODAL'} -class ExportVDB(bpy.types.Operator): - bl_idname = "bioxelnodes.export_vdb" - bl_label = "Bioxel Layer as VDB" +class ExportVolumeData(bpy.types.Operator): + bl_idname = "bioxelnodes.export_volume_data" + bl_label = "Export Layer" bl_description = "Export Bioxel Layer as VDB" bl_options = {'UNDO'} diff --git a/bioxelnodes/menus.py b/bioxelnodes/menus.py index e1c937e..2ee7ea7 100644 --- a/bioxelnodes/menus.py +++ b/bioxelnodes/menus.py @@ -1,67 +1,128 @@ import bpy -from .operators import ConvertToMesh, JoinLayers -from .io import ExportVDB, ImportVolumeData, AddVolumeData -from .misc import SaveBioxelLayers +from .operators import (AddPlaneCutter, AddCylinderCutter, AddCubeCutter, AddSphereCutter, CombineLabels, + ConvertToMesh, InvertScalar, FillByLabel, FillByThreshold, FillByRange) +from .io import ExportVolumeData, ImportAsLabelLayer, ImportAsScalarLayer, ImportVolumeData, AddVolumeData +from .misc import SaveLayers -class View3DBioxelsMenu(bpy.types.Menu): +class ModifyLayer(bpy.types.Menu): + bl_idname = "BIOXELNODES_MT_MODIFY_LAYERS" + bl_label = "Modify Layer" + + def draw(self, context): + layout = self.layout + layout.operator(InvertScalar.bl_idname) + layout.operator(FillByThreshold.bl_idname) + layout.operator(FillByRange.bl_idname) + layout.operator(FillByLabel.bl_idname) + layout.operator(CombineLabels.bl_idname) + + +class AddCutterMenu(bpy.types.Menu): + bl_idname = "BIOXELNODES_MT_CUTTERS" + bl_label = "Add Cutter" + + def draw(self, context): + layout = self.layout + layout.operator(AddPlaneCutter.bl_idname) + layout.operator(AddCylinderCutter.bl_idname) + layout.operator(AddCubeCutter.bl_idname) + layout.operator(AddSphereCutter.bl_idname) + + +class ImportLayerMenu(bpy.types.Menu): + bl_idname = "BIOXELNODES_MT_LAYERS" + bl_label = "Import Layer" + + def draw(self, context): + layout = self.layout + layout.operator(ImportAsScalarLayer.bl_idname) + layout.operator(ImportAsLabelLayer.bl_idname) + + +class BioxelNodesView3DMenu(bpy.types.Menu): bl_idname = "BIOXELNODES_MT_VIEW3D" bl_label = "Bioxel Nodes" def draw(self, context): layout = self.layout + layout.menu(ImportLayerMenu.bl_idname) + layout.menu(AddCutterMenu.bl_idname) layout.operator(ConvertToMesh.bl_idname) - layout.operator(AddVolumeData.bl_idname) layout.separator() - layout.operator(SaveBioxelLayers.bl_idname) + layout.operator(SaveLayers.bl_idname) -class OutlinerBioxelsMenu(bpy.types.Menu): +class BioxelNodesOutlinerMenu(bpy.types.Menu): bl_idname = "BIOXELNODES_MT_OUTLINER" bl_label = "Bioxel Nodes" def draw(self, context): layout = self.layout - layout.operator(JoinLayers.bl_idname) + layout.menu(ImportLayerMenu.bl_idname) + layout.menu(AddCutterMenu.bl_idname) layout.operator(ConvertToMesh.bl_idname) - layout.operator(AddVolumeData.bl_idname) layout.separator() - layout.operator(SaveBioxelLayers.bl_idname) + layout.menu(ModifyLayer.bl_idname) + layout.separator() + layout.operator(SaveLayers.bl_idname) -def TOPBAR_FILE_IMPORT(self, context): - layout = self.layout - layout.separator() - layout.operator(ImportVolumeData.bl_idname) +# def TOPBAR_FILE_IMPORT(self, context): +# layout = self.layout +# layout.separator() +# layout.operator(ImportVolumeData.bl_idname) -def TOPBAR_FILE_EXPORT(self, context): - layout = self.layout - layout.separator() - layout.operator(ExportVDB.bl_idname) +# def TOPBAR_FILE_EXPORT(self, context): +# layout = self.layout +# layout.separator() +# layout.operator(ExportVolumeData.bl_idname) def VIEW3D_OBJECT(self, context): layout = self.layout - layout.menu(View3DBioxelsMenu.bl_idname) + layout.menu(BioxelNodesView3DMenu.bl_idname) layout.separator() def OUTLINER_OBJECT(self, context): layout = self.layout - layout.menu(OutlinerBioxelsMenu.bl_idname) + layout.menu(BioxelNodesOutlinerMenu.bl_idname) layout.separator() +class BioxelNodesTopbarMenu(bpy.types.Menu): + bl_idname = "BIOXELNODES_MT_TOPBAR" + bl_label = "Bioxel Nodes" + + def draw(self, context): + layout = self.layout + layout.menu(ImportLayerMenu.bl_idname) + layout.operator(ExportVolumeData.bl_idname) + layout.separator() + layout.menu(AddCutterMenu.bl_idname) + layout.operator(ConvertToMesh.bl_idname) + layout.separator() + layout.operator(SaveLayers.bl_idname) + + +def TOPBAR(self, context): + layout = self.layout + layout.menu(BioxelNodesTopbarMenu.bl_idname) + + def add(): - bpy.types.TOPBAR_MT_file_import.append(TOPBAR_FILE_IMPORT) - bpy.types.TOPBAR_MT_file_export.append(TOPBAR_FILE_EXPORT) + # bpy.types.TOPBAR_MT_file_import.append(TOPBAR_FILE_IMPORT) + # bpy.types.TOPBAR_MT_file_export.append(TOPBAR_FILE_EXPORT) bpy.types.OUTLINER_MT_object.prepend(OUTLINER_OBJECT) bpy.types.VIEW3D_MT_object_context_menu.prepend(VIEW3D_OBJECT) + bpy.types.TOPBAR_MT_editor_menus.append(TOPBAR) def remove(): - bpy.types.TOPBAR_MT_file_import.remove(TOPBAR_FILE_IMPORT) - bpy.types.TOPBAR_MT_file_export.remove(TOPBAR_FILE_EXPORT) + # bpy.types.TOPBAR_MT_file_import.remove(TOPBAR_FILE_IMPORT) + # bpy.types.TOPBAR_MT_file_export.remove(TOPBAR_FILE_EXPORT) bpy.types.OUTLINER_MT_object.remove(OUTLINER_OBJECT) bpy.types.VIEW3D_MT_object_context_menu.remove(VIEW3D_OBJECT) + bpy.types.TOPBAR_MT_editor_menus.remove(TOPBAR) diff --git a/bioxelnodes/misc.py b/bioxelnodes/misc.py index bbc8fba..9f42ed0 100644 --- a/bioxelnodes/misc.py +++ b/bioxelnodes/misc.py @@ -22,9 +22,9 @@ def save_layer(layer, output_dir): str(output_path), start=str(blend_path)) -class SaveBioxelLayers(bpy.types.Operator): - bl_idname = "bioxelnodes.save_bioxel_layers" - bl_label = "Save Bioxel Layers" +class SaveLayers(bpy.types.Operator): + bl_idname = "bioxelnodes.save_layers" + bl_label = "Save Layers" bl_description = "Save Bioxel Layers to Directory." bl_options = {'UNDO'} @@ -52,9 +52,9 @@ def execute(self, context): return {'FINISHED'} -class SaveAllBioxelLayers(bpy.types.Operator): - bl_idname = "bioxelnodes.save_all_bioxel_layers" - bl_label = "Save All Bioxel Layers" +class SaveAllLayers(bpy.types.Operator): + bl_idname = "bioxelnodes.save_all_layers" + bl_label = "Save All Layers" bl_description = "Save All Bioxel Layers to Directory." bl_options = {'UNDO'} @@ -89,4 +89,4 @@ def draw(self, context): layout = self.layout scene = context.scene layout.prop(scene, 'bioxel_layer_dir') - layout.operator(SaveAllBioxelLayers.bl_idname) + layout.operator(SaveAllLayers.bl_idname) diff --git a/bioxelnodes/nodes.py b/bioxelnodes/nodes.py index af6633e..da4c86e 100644 --- a/bioxelnodes/nodes.py +++ b/bioxelnodes/nodes.py @@ -3,39 +3,57 @@ from .customnodes import CustomNodes import bpy +# def set_object_to_node_factory(object_type:str): +# if object_type == "plane": +# create_object = """ +# bpy.ops.mesh.primitive_plane_add(size=2, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) +# """ +# callback_str = f""" +# import bpy +# from ..utils import get_container +# container = get_container(bpy.context.active_object) +# if container: +# {create_object} +# object = bpy.context.active_object +# node.inputs[0].default_value = object +# else: +# print('Cannot find any Bioxel Container.') +# """ +# return callback_str -def add_driver_to_node_factory(source_prop, target_prop): - callback_str = f""" -import bpy -from .utils import add_direct_driver, get_bioxels_obj -bioxels_obj = get_bioxels_obj(bpy.context.active_object) -if bioxels_obj: - container_obj = bioxels_obj.parent - add_direct_driver( - target=node, - target_prop='{target_prop}', - source=container_obj, - source_prop='{source_prop}' - ) -else: - print('Cannot find any bioxels.') - """ - return callback_str +# def add_driver_to_node_factory(source_prop, target_prop): +# callback_str = f""" +# import bpy +# from .utils import add_direct_driver, get_bioxels_obj +# bioxels_obj = get_bioxels_obj(bpy.context.active_object) +# if bioxels_obj: +# container_obj = bioxels_obj.parent +# add_direct_driver( +# target=node, +# target_prop='{target_prop}', +# source=container_obj, +# source_prop='{source_prop}' +# ) +# else: +# print('Cannot find any bioxels.') +# """ +# return callback_str -def set_prop_to_node_factory(source_prop, target_prop): - callback_str = f""" -import bpy -from .utils import get_bioxels_obj -bioxels_obj = get_bioxels_obj(bpy.context.active_object) -if bioxels_obj: - container_obj = bioxels_obj.parent - node.inputs.get('{target_prop}').default_value = container_obj.get( - '{source_prop}') -else: - print('Cannot find any bioxels.') - """ - return callback_str + +# def set_prop_to_node_factory(source_prop, target_prop): +# callback_str = f""" +# import bpy +# from .utils import get_bioxels_obj +# bioxels_obj = get_bioxels_obj(bpy.context.active_object) +# if bioxels_obj: +# container_obj = bioxels_obj.parent +# node.inputs.get('{target_prop}').default_value = container_obj.get( +# '{source_prop}') +# else: +# print('Cannot find any bioxels.') +# """ +# return callback_str if bpy.app.version >= (4, 1, 0): @@ -70,6 +88,12 @@ def set_prop_to_node_factory(source_prop, target_prop): 'label': 'Shaders', 'icon': 'SHADING_RENDERED', 'items': [ + { + 'label': 'Membrane Shader', + 'icon': 'NODE_MATERIAL', + 'node_type': 'BioxelNodes_MembraneShader', + 'node_description': '' + }, { 'label': 'Solid Shader', 'icon': 'SHADING_SOLID', @@ -78,19 +102,19 @@ def set_prop_to_node_factory(source_prop, target_prop): }, { 'label': 'Slime Shader', - 'icon': 'OUTLINER_OB_META', + 'icon': 'OUTLINER_DATA_META', 'node_type': 'BioxelNodes_AssignSlimeShader', 'node_description': '' }, { 'label': 'Volume Shader', - 'icon': 'OUTLINER_OB_VOLUME', + 'icon': 'VOLUME_DATA', 'node_type': 'BioxelNodes_AssignVolumeShader', 'node_description': '' }, { 'label': 'Universal Shader', - 'icon': 'SHADING_RENDERED', + 'icon': 'MATSHADERBALL', 'node_type': 'BioxelNodes_AssignUniversalShader', 'node_description': '' } @@ -106,6 +130,7 @@ def set_prop_to_node_factory(source_prop, target_prop): 'node_type': 'BioxelNodes_SetColorPresets', 'node_description': '' }, + "separator", { 'label': 'Color Ramp 2', 'icon': 'IPO_QUAD', @@ -145,14 +170,26 @@ def set_prop_to_node_factory(source_prop, target_prop): "separator", { 'label': 'Plane Cutter', - 'icon': 'MOD_LATTICE', - 'node_type': 'BioxelNodes_PlaneCutter', + 'icon': 'MESH_PLANE', + 'node_type': 'BioxelNodes_PlaneObjectCutter', + 'node_description': '' + }, + { + 'label': 'Cylinder Cutter', + 'icon': 'MESH_CYLINDER', + 'node_type': 'BioxelNodes_CylinderObjectCutter', 'node_description': '', }, { - 'label': 'Plane Object Cutter', - 'icon': 'OUTLINER_OB_LATTICE', - 'node_type': 'BioxelNodes_PlaneObjectCutter', + 'label': 'Cube Cutter', + 'icon': 'MESH_CUBE', + 'node_type': 'BioxelNodes_CubeObjectCutter', + 'node_description': '', + }, + { + 'label': 'Sphere Cutter', + 'icon': 'MESH_UVSPHERE', + 'node_type': 'BioxelNodes_SphereObjectCutter', 'node_description': '', } ] @@ -166,6 +203,44 @@ def set_prop_to_node_factory(source_prop, target_prop): 'icon': 'CONSTRAINT_BONE', 'node_type': 'BioxelNodes_JoinComponent', 'node_description': '' + }, + "separator", + { + 'label': 'To Mesh', + 'icon': 'OUTLINER_OB_MESH', + 'node_type': 'BioxelNodes_ToMesh', + 'node_description': '' + }, + { + 'label': 'To Volume', + 'icon': 'OUTLINER_OB_VOLUME', + 'node_type': 'BioxelNodes_ToVolume', + 'node_description': '' + }, + { + 'label': 'To Bbox Wire', + 'icon': 'MESH_CUBE', + 'node_type': 'BioxelNodes_ToBboxWire', + 'node_description': '' + }, + "separator", + { + 'label': 'Inflate', + 'icon': 'OUTLINER_OB_META', + 'node_type': 'META_DATA', + 'node_description': '' + }, + { + 'label': 'Smooth', + 'icon': 'MOD_SMOOTH', + 'node_type': 'BioxelNodes_M_Smooth', + 'node_description': '' + }, + { + 'label': 'Remove Small Island', + 'icon': 'FORCE_LENNARDJONES', + 'node_type': 'BioxelNodes_M_RemoveSmallIsland', + 'node_description': '' } ] } @@ -271,14 +346,26 @@ def set_prop_to_node_factory(source_prop, target_prop): "separator", { 'label': 'Plane Cutter', - 'icon': 'MOD_LATTICE', - 'node_type': 'BioxelNodes_PlaneCutter', + 'icon': 'MESH_PLANE', + 'node_type': 'BioxelNodes_PlaneObjectCutter', + 'node_description': '' + }, + { + 'label': 'Cylinder Cutter', + 'icon': 'MESH_CYLINDER', + 'node_type': 'BioxelNodes_CylinderObjectCutter', 'node_description': '', }, { - 'label': 'Plane Object Cutter', - 'icon': 'OUTLINER_OB_LATTICE', - 'node_type': 'BioxelNodes_PlaneObjectCutter', + 'label': 'Cube Cutter', + 'icon': 'MESH_CUBE', + 'node_type': 'BioxelNodes_CubeObjectCutter', + 'node_description': '', + }, + { + 'label': 'Sphere Cutter', + 'icon': 'MESH_UVSPHERE', + 'node_type': 'BioxelNodes_SphereObjectCutter', 'node_description': '', } ] diff --git a/bioxelnodes/operators.py b/bioxelnodes/operators.py index 4e95966..dd38a64 100644 --- a/bioxelnodes/operators.py +++ b/bioxelnodes/operators.py @@ -2,16 +2,146 @@ import random import bpy import pyopenvdb as vdb -from uuid import uuid4 +import numpy as np + +from . import skimage as ski +from . import scipy from .nodes import custom_nodes -from .utils import (get_container, get_container_layers, - get_layer, get_node_by_type, hide_in_ray, lock_transform) +from .utils import (get_container, get_container_from_selection, get_container_layers, + get_layer, get_node_by_type, hide_in_ray, lock_transform, save_vdb) + + +def get_layer_name(layer): + container = layer.parent + return layer.name.removeprefix(container.name).replace("_", "") + + +def get_grids(layer): + layer_dir = bpy.path.abspath(layer.data.filepath) + grids, base_metadata = vdb.readAll(layer_dir) + return grids + + +def set_volume(grids, index, volume): + grids[index].clear() + grids[index].copyFromArray(volume.copy().astype(np.float32)) + + +def get_volume(grids, index, shape): + volume = np.ndarray(shape, np.float32) + volume.fill(index) + grids[0].copyToArray(volume) + return volume + + +def get_shape(layer): + layer_node = layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] + return [int(a) + for a in layer_node.inputs['Shape'].default_value] + + +def get_layer_meta(layer, key: str): + layer_node = layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] + return layer_node.inputs[key].default_value + + +def set_layer_meta(layer, key: str, value): + layer_node = layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] + layer_node.inputs[key].default_value = value + + +def add_mask_node(container, layer, node_type: str, node_label: str): + modifier = container.modifiers[0] + node_tree = modifier.node_group + + # Deselect all nodes first + for node in node_tree.nodes: + if node.select: + node.select = False + + mask_node = custom_nodes.add_node(node_tree.nodes, node_type) + mask_node.label = node_label + mask_node.inputs[0].default_value = layer + + # Connect to output if no output linked + output_node = get_node_by_type(node_tree.nodes, + 'NodeGroupOutput')[0] + if len(output_node.inputs[0].links) == 0: + node_tree.links.new(mask_node.outputs[0], + output_node.inputs[0]) + + return mask_node + + +def deep_copy_layer(vdb_path, base_layer, name): + # Read VDB + print(f"Loading the cache to Blender scene...") + bpy.ops.object.volume_import( + filepath=str(vdb_path), align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) + + copyed_layer = bpy.context.active_object + + # Set props to VDB object + copyed_layer.name = name + copyed_layer.data.name = name + + lock_transform(copyed_layer) + hide_in_ray(copyed_layer) + copyed_layer.hide_select = True + copyed_layer.hide_render = True + copyed_layer.hide_viewport = True + copyed_layer.data.display.use_slice = True + copyed_layer.data.display.density = 1e-05 + + copyed_layer['bioxel_layer'] = True + copyed_layer['bioxel_layer_type'] = base_layer['bioxel_layer_type'] + copyed_layer.parent = base_layer.parent + + for collection in copyed_layer.users_collection: + collection.objects.unlink(copyed_layer) + + for collection in base_layer.users_collection: + collection.objects.link(copyed_layer) + + # add convert to layer node + base_layer_node = base_layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] + + bioxel_size = base_layer_node.inputs['Bioxel Size'].default_value + layer_shape = base_layer_node.inputs['Shape'].default_value + layer_origin = base_layer_node.inputs['Origin'].default_value + layer_rotation = base_layer_node.inputs['Rotation'].default_value + scalar_offset = base_layer_node.inputs['Scalar Offset'].default_value + scalar_min = base_layer_node.inputs['Scalar Min'].default_value + scalar_max = base_layer_node.inputs['Scalar Max'].default_value + + bpy.ops.node.new_geometry_nodes_modifier() + node_tree = copyed_layer.modifiers[0].node_group + + input_node = get_node_by_type(node_tree.nodes, 'NodeGroupInput')[0] + output_node = get_node_by_type(node_tree.nodes, 'NodeGroupOutput')[0] + + copyed_layer_node = custom_nodes.add_node(node_tree.nodes, + "BioxelNodes__ConvertToLayer") + + node_tree.links.new(input_node.outputs[0], copyed_layer_node.inputs[0]) + node_tree.links.new(copyed_layer_node.outputs[0], output_node.inputs[0]) + + copyed_layer_node.inputs['Layer ID'].default_value = random.randint(-200000000, + 200000000) + copyed_layer_node.inputs['Bioxel Size'].default_value = bioxel_size + copyed_layer_node.inputs['Shape'].default_value = layer_shape + copyed_layer_node.inputs['Origin'].default_value = layer_origin + copyed_layer_node.inputs['Rotation'].default_value = layer_rotation + copyed_layer_node.inputs['Scalar Offset'].default_value = scalar_offset + copyed_layer_node.inputs['Scalar Min'].default_value = scalar_min + copyed_layer_node.inputs['Scalar Max'].default_value = scalar_max + + return copyed_layer def get_scalar_layer_selection(self, context): items = [("None", "None", "")] - base_layer = bpy.data.objects[self.base_layer] - container = base_layer.parent + container = get_container(bpy.context.active_object) for layer in get_container_layers(container): if layer.get("bioxel_layer_type") == "scalar": items.append(( @@ -25,8 +155,7 @@ def get_scalar_layer_selection(self, context): def get_label_layer_selection(self, context): items = [("None", "None", "")] - base_layer = bpy.data.objects[self.base_layer] - container = base_layer.parent + container = get_container(bpy.context.active_object) for layer in get_container_layers(container): if layer.get("bioxel_layer_type") == "label": items.append(( @@ -38,197 +167,556 @@ def get_label_layer_selection(self, context): return items -class JoinLayers(bpy.types.Operator): - bl_idname = "bioxelnodes.join_layers" - bl_label = "Join Additinal Layers" - bl_description = "Join Layers" +# class JoinLayers(bpy.types.Operator): +# bl_idname = "bioxelnodes.join_layers" +# bl_label = "Join Bioxel Layers" +# bl_description = "Join Additional Bioxel Layers" +# bl_options = {'UNDO'} + +# base_layer: bpy.props.StringProperty( +# options={"HIDDEN"} +# ) # type: ignore + +# scalar_layer: bpy.props.EnumProperty( +# name="Scaler Layer", +# items=get_scalar_layer_selection +# ) # type: ignore + +# label_layer: bpy.props.EnumProperty( +# name="Label Layer", +# items=get_label_layer_selection +# ) # type: ignore +# # color_layer: bpy.props.StringProperty() # type: ignore + +# @classmethod +# def poll(cls, context): +# layer = get_layer(bpy.context.active_object) +# return True if layer else False + +# def execute(self, context): +# base_layer = bpy.data.objects[self.base_layer] + +# if not base_layer: +# self.report({"WARNING"}, "Cannot find any bioxel layer as base.") +# return {'FINISHED'} + +# base_layer_dir = bpy.path.abspath(base_layer.data.filepath) +# base_grids, base_metadata = vdb.readAll(base_layer_dir) + +# layers = [] +# if self.scalar_layer != "None": +# scalar_layer = bpy.data.objects[self.scalar_layer] +# layers.append(scalar_layer) + +# if self.label_layer != "None": +# label_layer = bpy.data.objects[self.label_layer] +# layers.append(label_layer) + +# # TODO: add color and vector + +# if len(layers) == 0: +# self.report({"WARNING"}, "No additinal layers setted.") +# return {'FINISHED'} + +# for layer in layers: +# layer_dir = bpy.path.abspath(layer.data.filepath) +# grids, metadata = vdb.readAll(layer_dir) +# base_grids.extend(grids) + +# vdb_path = save_vdb(base_grids, context) + +# joined_layer = deep_copy_layer(vdb_path, +# base_layer, +# f"{base_layer.name}_Joined") + +# return {'FINISHED'} + +# def invoke(self, context, event): +# base_layer = get_layer(bpy.context.active_object) +# self.base_layer = base_layer.name +# context.window_manager.invoke_props_dialog(self, width=400) +# return {'RUNNING_MODAL'} + + +class InvertScalar(bpy.types.Operator): + bl_idname = "bioxelnodes.invert_scalar" + bl_label = "Invert Scalar" + bl_description = "Invert the scalar value" + bl_options = {'UNDO'} + + @classmethod + def poll(cls, context): + layer = get_layer(bpy.context.active_object) + if layer: + return layer.get("bioxel_layer_type") == "scalar" + else: + return False + + def execute(self, context): + base_layer = get_layer(bpy.context.active_object) + + container = base_layer.parent + inverted_layer_name = f"{get_layer_name(base_layer)}_Inverted" + + base_shape = get_shape(base_layer) + scalar_offset = get_layer_meta(base_layer, "Scalar Offset") + + base_grids = get_grids(base_layer) + base_volume = get_volume(base_grids, 0, base_shape) + + base_volume = -(base_volume - scalar_offset) + + base_min = float(np.min(base_volume)) + base_max = float(np.max(base_volume)) + + scalar_offset = 0 + if base_min < 0: + scalar_offset = -base_min + base_volume = base_volume + scalar_offset + + set_volume(base_grids, 0, base_volume) + vdb_path = save_vdb(base_grids, context) + inverted_layer = deep_copy_layer(vdb_path, + base_layer, + f"{container.name}_{inverted_layer_name}") + set_layer_meta(inverted_layer, 'Scalar Offset', scalar_offset) + set_layer_meta(inverted_layer, 'Scalar Min', base_min) + set_layer_meta(inverted_layer, 'Scalar Max', base_max) + + add_mask_node(container, + inverted_layer, + "BioxelNodes_MaskByThreshold", + inverted_layer_name) + + bpy.context.view_layer.objects.active = container + + return {'FINISHED'} + + +class FillByThreshold(bpy.types.Operator): + bl_idname = "bioxelnodes.fill_by_threshold" + bl_label = "Fill by Threshold" + bl_description = "Fill Value by Threshold" bl_options = {'UNDO'} - base_layer: bpy.props.StringProperty( - options={"HIDDEN"} - ) # type: ignore + threshold: bpy.props.FloatProperty( + name="Threshold", + soft_min=0, soft_max=1024, + default=128, + ) # type: ignore - scalar_layer: bpy.props.EnumProperty( - name="Scaler Layer", - items=get_scalar_layer_selection + fill_value: bpy.props.FloatProperty( + name="Fill Value", + soft_min=0, soft_max=1024.0, + default=0, ) # type: ignore - label_layer: bpy.props.EnumProperty( - name="Label Layer", - items=get_label_layer_selection + invert: bpy.props.BoolProperty( + name="Invert Area", + default=True, ) # type: ignore - # color_layer: bpy.props.StringProperty() # type: ignore @classmethod def poll(cls, context): layer = get_layer(bpy.context.active_object) - return True if layer else False + if layer: + return layer.get("bioxel_layer_type") == "scalar" + else: + return False def execute(self, context): - base_layer = bpy.data.objects[self.base_layer] + base_layer = get_layer(bpy.context.active_object) - if not base_layer: - self.report({"WARNING"}, "Cannot find any bioxel layer as base.") - return {'FINISHED'} + container = base_layer.parent + filled_layer_name = f"{get_layer_name(base_layer)}_{self.threshold}-Filled" + scalar_offset = get_layer_meta(base_layer, "Scalar Offset") + base_shape = get_shape(base_layer) + + base_grids = get_grids(base_layer) + base_volume = get_volume(base_grids, 0, base_shape) + mask = base_volume > (self.threshold + scalar_offset) + mask = scipy.median_filter(mask.astype(np.float32), size=2) + if self.invert: + base_volume = mask * base_volume + \ + (1-mask) * (self.fill_value + scalar_offset) + else: + base_volume = (1-mask) * base_volume + \ + mask * (self.fill_value + scalar_offset) + + set_volume(base_grids, 0, base_volume) + vdb_path = save_vdb(base_grids, context) + filled_layer = deep_copy_layer(vdb_path, + base_layer, + f"{container.name}_{filled_layer_name}") + mask_node = add_mask_node(container, + filled_layer, + "BioxelNodes_MaskByThreshold", + filled_layer_name) + + mask_node.inputs[1].default_value = self.threshold + + bpy.context.view_layer.objects.active = container + + return {'FINISHED'} + + def invoke(self, context, event): + base_layer = get_layer(bpy.context.active_object) + scalar_min = get_layer_meta(base_layer, "Scalar Min") + self.fill_value = scalar_min + context.window_manager.invoke_props_dialog(self, width=400) + return {'RUNNING_MODAL'} - base_layer_dir = bpy.path.abspath(base_layer.data.filepath) - base_grids, base_metadata = vdb.readAll(base_layer_dir) - preferences = context.preferences.addons[__package__].preferences - cache_dir = Path(preferences.cache_dir, 'VDBs') - cache_dir.mkdir(parents=True, exist_ok=True) +class FillByRange(bpy.types.Operator): + bl_idname = "bioxelnodes.fill_by_range" + bl_label = "Fill by Range" + bl_description = "Fill Value by Range" + bl_options = {'UNDO'} - layers = [] - if self.scalar_layer != "None": - scalar_layer = bpy.data.objects[self.scalar_layer] - layers.append(scalar_layer) + from_min: bpy.props.FloatProperty( + name="From Min", + soft_min=0, soft_max=1024, + default=128, + ) # type: ignore - if self.label_layer != "None": - label_layer = bpy.data.objects[self.label_layer] - layers.append(label_layer) + from_max: bpy.props.FloatProperty( + name="From Max", + soft_min=0, soft_max=1024, + default=256, + ) # type: ignore - # TODO: add color and vector + fill_value: bpy.props.FloatProperty( + name="Fill Value", + soft_min=0, soft_max=1024.0, + default=0, + ) # type: ignore - if len(layers) == 0: - self.report({"WARNING"}, "No additinal layers setted.") - return {'FINISHED'} + invert: bpy.props.BoolProperty( + name="Invert Area", + default=True, + ) # type: ignore - for layer in layers: - layer_dir = bpy.path.abspath(layer.data.filepath) - grids, metadata = vdb.readAll(layer_dir) - base_grids.extend(grids) - - vdb_path = Path(cache_dir, f"{uuid4()}.vdb") - print(f"Storing the VDB file ({str(vdb_path)})...") - vdb.write(str(vdb_path), grids=base_grids) - - # Read VDB - print(f"Loading the cache to Blender scene...") - bpy.ops.object.volume_import( - filepath=str(vdb_path), align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) - - joined_layer = bpy.context.active_object - - # Set props to VDB object - joined_layer.name = f"{base_layer.name}_Joined" - joined_layer.data.name = f"{base_layer.name}_Joined" - - lock_transform(joined_layer) - hide_in_ray(joined_layer) - joined_layer.hide_select = True - joined_layer.hide_render = True - joined_layer.hide_viewport = True - joined_layer.data.display.use_slice = True - joined_layer.data.display.density = 1e-05 - - joined_layer['bioxel_layer'] = True - joined_layer.parent = base_layer.parent - - for collection in layer.users_collection: - collection.objects.unlink(layer) - - for collection in base_layer.users_collection: - collection.objects.link(layer) - - # add convert to layer node - base_layer_node = base_layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] - bioxel_size = base_layer_node.inputs['Bioxel Size'].default_value - layer_shape = base_layer_node.inputs['Shape'].default_value - layer_origin = base_layer_node.inputs['Origin'].default_value - layer_rotation = base_layer_node.inputs['Rotation'].default_value - scalar_offset = base_layer_node.inputs['Scalar Offset'].default_value - scalar_min = base_layer_node.inputs['Scalar Min'].default_value - scalar_max = base_layer_node.inputs['Scalar Max'].default_value - - if self.scalar_layer != "None": - scalar_layer_node = scalar_layer.modifiers[0].node_group.nodes['BioxelNodes__ConvertToLayer'] - scalar_offset = scalar_layer_node.inputs['Scalar Offset'].default_value - scalar_min = scalar_layer_node.inputs['Scalar Min'].default_value - scalar_max = scalar_layer_node.inputs['Scalar Max'].default_value + @classmethod + def poll(cls, context): + layer = get_layer(bpy.context.active_object) + if layer: + return layer.get("bioxel_layer_type") == "scalar" + else: + return False - bpy.ops.node.new_geometry_nodes_modifier() - node_tree = joined_layer.modifiers[0].node_group - nodes = node_tree.nodes - links = node_tree.links + def execute(self, context): + base_layer = get_layer(bpy.context.active_object) + + container = base_layer.parent + filled_layer_name = f"{get_layer_name(base_layer)}_{self.from_min}-{self.from_max}-Filled" + scalar_offset = get_layer_meta(base_layer, "Scalar Offset") + base_shape = get_shape(base_layer) + + base_grids = get_grids(base_layer) + base_volume = get_volume(base_grids, 0, base_shape) + + mask = (base_volume > self.from_min + scalar_offset) \ + & (base_volume < self.from_max + scalar_offset) + mask = scipy.median_filter(mask.astype(np.float32), size=2) + + if self.invert: + base_volume = mask * base_volume + \ + (1-mask) * (self.fill_value + scalar_offset) + else: + base_volume = (1-mask) * base_volume + \ + mask * (self.fill_value + scalar_offset) + + set_volume(base_grids, 0, base_volume) + vdb_path = save_vdb(base_grids, context) + filled_layer = deep_copy_layer(vdb_path, + base_layer, + f"{container.name}_{filled_layer_name}") + mask_node = add_mask_node(container, + filled_layer, + "BioxelNodes_MaskByThreshold", + filled_layer_name) + + mask_node.inputs[1].default_value = self.from_min - input_node = get_node_by_type(nodes, 'NodeGroupInput')[0] - output_node = get_node_by_type(nodes, 'NodeGroupOutput')[0] + bpy.context.view_layer.objects.active = container + + return {'FINISHED'} + + def invoke(self, context, event): + base_layer = get_layer(bpy.context.active_object) + scalar_min = get_layer_meta(base_layer, "Scalar Min") + self.fill_value = scalar_min + context.window_manager.invoke_props_dialog(self, width=400) + return {'RUNNING_MODAL'} + + +class FillByLabel(bpy.types.Operator): + bl_idname = "bioxelnodes.fill_by_label" + bl_label = "Fill by Label" + bl_description = "Fill Value by Label Area" + bl_options = {'UNDO'} + + label_layer: bpy.props.EnumProperty( + name="Label Layer", + items=get_label_layer_selection + ) # type: ignore + + fill_value: bpy.props.FloatProperty( + name="Fill Value", + soft_min=0, soft_max=1024.0, + default=0, + ) # type: ignore + + invert: bpy.props.BoolProperty( + name="Invert Label", + default=True, + ) # type: ignore + + @classmethod + def poll(cls, context): + layer = get_layer(bpy.context.active_object) + if layer: + return layer.get("bioxel_layer_type") == "scalar" + else: + return False - joined_layer_node = custom_nodes.add_node( - nodes, "BioxelNodes__ConvertToLayer") + def execute(self, context): + base_layer = get_layer(bpy.context.active_object) + label_layer = bpy.data.objects[self.label_layer] - links.new(input_node.outputs[0], joined_layer_node.inputs[0]) - links.new(joined_layer_node.outputs[0], output_node.inputs[0]) + if not label_layer: + self.report({"WARNING"}, "Cannot find any label layer.") + return {'FINISHED'} - joined_layer_node.inputs['Layer ID'].default_value = random.randint(-200000000, - 200000000) - joined_layer_node.inputs['Bioxel Size'].default_value = bioxel_size - joined_layer_node.inputs['Shape'].default_value = layer_shape - joined_layer_node.inputs['Origin'].default_value = layer_origin - joined_layer_node.inputs['Rotation'].default_value = layer_rotation - joined_layer_node.inputs['Scalar Offset'].default_value = scalar_offset - joined_layer_node.inputs['Scalar Min'].default_value = scalar_min - joined_layer_node.inputs['Scalar Max'].default_value = scalar_max + container = base_layer.parent + filled_layer_name = f"{get_layer_name(base_layer)}_{get_layer_name(label_layer)}-Filled" + scalar_offset = get_layer_meta(base_layer, "Scalar Offset") + base_shape = get_shape(base_layer) + label_shape = get_shape(label_layer) + + base_grids = get_grids(base_layer) + base_volume = get_volume(base_grids, 0, base_shape) + + label_grids = get_grids(label_layer) + mask = get_volume(label_grids, 0, label_shape) + mask = ski.resize(mask, + base_shape, + anti_aliasing=True) + mask = scipy.median_filter(mask.astype(np.float32), size=2) + + if self.invert: + base_volume = mask * base_volume + \ + (1-mask) * (self.fill_value + scalar_offset) + else: + base_volume = (1-mask) * base_volume + \ + mask * (self.fill_value + scalar_offset) + + set_volume(base_grids, 0, base_volume) + vdb_path = save_vdb(base_grids, context) + filled_layer = deep_copy_layer(vdb_path, + base_layer, + f"{container.name}_{filled_layer_name}") + add_mask_node(container, + filled_layer, + "BioxelNodes_MaskByThreshold", + filled_layer_name) + + bpy.context.view_layer.objects.active = container return {'FINISHED'} def invoke(self, context, event): base_layer = get_layer(bpy.context.active_object) - self.base_layer = base_layer.name + scalar_min = get_layer_meta(base_layer, "Scalar Min") + self.fill_value = scalar_min context.window_manager.invoke_props_dialog(self, width=400) return {'RUNNING_MODAL'} +class CombineLabels(bpy.types.Operator): + bl_idname = "bioxelnodes.combine_labels" + bl_label = "Combine Labels" + bl_description = "Combine all selected labels" + bl_options = {'UNDO'} + + @classmethod + def poll(cls, context): + labels = [obj for obj in context.selected_ids + if obj.get("bioxel_layer_type") == "label"] + return True if len(labels) > 1 else False + + def execute(self, context): + labels = [obj for obj in context.selected_ids + if obj.get("bioxel_layer_type") == "label"] + base_layer = labels[0] + labels = labels[1:] + container = base_layer.parent + label_names = [get_layer_name(base_layer)] + base_shape = get_shape(base_layer) + + base_grids = get_grids(base_layer) + base_volume = get_volume(base_grids, 0, base_shape) + base_volume = base_volume + + for label in labels: + label_shape = get_shape(label) + label_grids = get_grids(label) + label_volume = get_volume(label_grids, 0, label_shape) + label_volume = ski.resize(label_volume, + base_shape, + anti_aliasing=True) + base_volume = np.maximum(base_volume, label_volume) + label_names.append(get_layer_name(label)) + + set_volume(base_grids, 0, base_volume) + + combined_layer_name = f"{'-'.join(label_names)}-Combined" + vdb_path = save_vdb(base_grids, context) + combined_layer = deep_copy_layer(vdb_path, + base_layer, + f"{container.name}_{combined_layer_name}") + add_mask_node(container, + combined_layer, + "BioxelNodes_MaskByLabel", + combined_layer_name) + + bpy.context.view_layer.objects.active = container + return {'FINISHED'} + + class ConvertToMesh(bpy.types.Operator): bl_idname = "bioxelnodes.convert_to_mesh" - bl_label = "Bioxel Components To Mesh" + bl_label = "Convert To Mesh" bl_description = "Convert Bioxel Components To Mesh" bl_options = {'UNDO'} @classmethod def poll(cls, context): - container = get_container(bpy.context.active_object) - return True if container else False + containers = get_container_from_selection() + return len(containers) > 0 def execute(self, context): - container = get_container(bpy.context.active_object) + containers = get_container_from_selection() - if not container: + if len(containers) == 0: self.report({"WARNING"}, "Cannot find any bioxel container.") return {'FINISHED'} + container = containers[0] + bpy.ops.mesh.primitive_cube_add( size=2, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) mesh = bpy.context.active_object mesh.name = f"Mesh_{container.name}" - bpy.ops.object.constraint_add(type='COPY_TRANSFORMS') - mesh.constraints[0].target = container + # bpy.ops.object.constraint_add(type='COPY_TRANSFORMS') + # mesh.constraints[0].target = container bpy.ops.node.new_geometry_nodes_modifier() modifier = mesh.modifiers[0] - nodes = modifier.node_group.nodes - links = modifier.node_group.links - - output_node = get_node_by_type(nodes, 'NodeGroupOutput')[0] - object_node = nodes.new("GeometryNodeObjectInfo") - realize_nodes = nodes.new("GeometryNodeRealizeInstances") - separate_node = custom_nodes.add_node(nodes, - "BioxelNodes__SeparateComponent") + node_tree = modifier.node_group - object_node.inputs[0].default_value = container - separate_node.inputs[1].default_value = 1 + output_node = get_node_by_type(node_tree.nodes, 'NodeGroupOutput')[0] + to_mesh_node = custom_nodes.add_node(node_tree.nodes, + "BioxelNodes_ToMesh") - links.new(object_node.outputs['Geometry'], separate_node.inputs[0]) - links.new(separate_node.outputs[0], realize_nodes.inputs[0]) - links.new(realize_nodes.outputs[0], output_node.inputs[0]) + to_mesh_node.inputs[0].default_value = container + node_tree.links.new(to_mesh_node.outputs[0], output_node.inputs[0]) bpy.ops.object.convert(target='MESH') - bpy.ops.constraint.apply(constraint="Copy Transforms", owner='OBJECT') + # bpy.ops.constraint.apply( + # constraint=mesh.constraints[0].name, owner='OBJECT') bpy.context.object.active_material_index = 1 bpy.ops.object.material_slot_remove() self.report({"INFO"}, f"Successfully convert to mesh") return {'FINISHED'} + + +class AddCutter(): + bl_options = {'UNDO'} + + @classmethod + def poll(cls, context): + containers = get_container_from_selection() + return len(containers) > 0 + + def execute(self, context): + containers = get_container_from_selection() + + if len(containers) == 0: + self.report({"WARNING"}, "Cannot find any bioxel container.") + return {'FINISHED'} + + container = containers[0] + + if self.object_type == "plane": + node_type = "BioxelNodes_PlaneObjectCutter" + bpy.ops.mesh.primitive_plane_add( + size=2, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) + elif self.object_type == "cylinder": + node_type = "BioxelNodes_CylinderObjectCutter" + bpy.ops.mesh.primitive_cylinder_add( + radius=1, depth=2, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) + bpy.context.object.rotation_euler[0] = container.rotation_euler[0] + elif self.object_type == "cube": + node_type = "BioxelNodes_CubeObjectCutter" + bpy.ops.mesh.primitive_cube_add( + size=2, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) + elif self.object_type == "sphere": + node_type = "BioxelNodes_SphereObjectCutter" + bpy.ops.mesh.primitive_ico_sphere_add( + radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) + + cutter = bpy.context.active_object + cutter.visible_camera = False + cutter.visible_diffuse = False + cutter.visible_glossy = False + cutter.visible_transmission = False + cutter.visible_volume_scatter = False + cutter.visible_shadow = False + cutter.hide_render = True + cutter.display_type = 'WIRE' + + modifier = container.modifiers[0] + node_tree = modifier.node_group + cutter_node = custom_nodes.add_node(node_tree.nodes, node_type) + cutter_node.inputs[0].default_value = cutter + + bpy.context.view_layer.objects.active = container + + return {'FINISHED'} + + +class AddPlaneCutter(bpy.types.Operator, AddCutter): + bl_idname = "bioxelnodes.add_plane_cutter" + bl_label = "Add Plane Cutter" + bl_description = "Add Plane Cutter to Container" + bl_options = {'UNDO'} + object_type = "plane" + + +class AddCylinderCutter(bpy.types.Operator, AddCutter): + bl_idname = "bioxelnodes.add_cylinder_cutter" + bl_label = "Add Cylinder Cutter" + bl_description = "Add Cylinder Cutter to Container" + bl_options = {'UNDO'} + object_type = "cylinder" + + +class AddCubeCutter(bpy.types.Operator, AddCutter): + bl_idname = "bioxelnodes.add_cube_cutter" + bl_label = "Add Cube Cutter" + bl_description = "Add Cube Cutter to Container" + bl_options = {'UNDO'} + object_type = "cube" + + +class AddSphereCutter(bpy.types.Operator, AddCutter): + bl_idname = "bioxelnodes.add_sphere_cutter" + bl_label = "Add Sphere Cutter" + bl_description = "Add Sphere Cutter to Container" + bl_options = {'UNDO'} + object_type = "sphere" diff --git a/bioxelnodes/scipy/__init__.py b/bioxelnodes/scipy/__init__.py new file mode 100644 index 0000000..299f581 --- /dev/null +++ b/bioxelnodes/scipy/__init__.py @@ -0,0 +1,2 @@ +from ._interpolation import zoom +from ._filters import gaussian_filter, median_filter diff --git a/bioxelnodes/scipy/_filters.py b/bioxelnodes/scipy/_filters.py new file mode 100644 index 0000000..3bbd8ef --- /dev/null +++ b/bioxelnodes/scipy/_filters.py @@ -0,0 +1,1849 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import numbers +import warnings +import numpy as np +import operator + +from ._utils import normalize_axis_index +from . import _ni_support +from . import _nd_image + + + +def _invalid_origin(origin, lenw): + return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2) + + +def _complex_via_real_components(func, input, weights, output, cval, **kwargs): + """Complex convolution via a linear combination of real convolutions.""" + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input and complex_weights: + # real component of the output + func(input.real, weights.real, output=output.real, + cval=np.real(cval), **kwargs) + output.real -= func(input.imag, weights.imag, output=None, + cval=np.imag(cval), **kwargs) + # imaginary component of the output + func(input.real, weights.imag, output=output.imag, + cval=np.real(cval), **kwargs) + output.imag += func(input.imag, weights.real, output=None, + cval=np.imag(cval), **kwargs) + elif complex_input: + func(input.real, weights, output=output.real, cval=np.real(cval), + **kwargs) + func(input.imag, weights, output=output.imag, cval=np.imag(cval), + **kwargs) + else: + if np.iscomplexobj(cval): + raise ValueError("Cannot provide a complex-valued cval when the " + "input is real.") + func(input, weights.real, output=output.real, cval=cval, **kwargs) + func(input, weights.imag, output=output.imag, cval=cval, **kwargs) + return output + + + +def correlate1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D correlation along the given axis. + + The lines of the array along the given axis are correlated with the + given weights. + + Parameters + ---------- + %(input)s + weights : array + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Correlation result. Has the same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import correlate1d + >>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([ 8, 26, 8, 12, 7, 28, 36, 9]) + """ + input = np.asarray(input) + weights = np.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights: + weights = weights.conj() + weights = weights.astype(np.complex128, copy=False) + kwargs = dict(axis=axis, mode=mode, origin=origin) + output = _ni_support._get_output(output, input, complex_output=True) + return _complex_via_real_components(correlate1d, input, weights, + output, cval, **kwargs) + + output = _ni_support._get_output(output, input) + weights = np.asarray(weights, dtype=np.float64) + if weights.ndim != 1 or weights.shape[0] < 1: + raise RuntimeError('no filter weights given') + if not weights.flags.contiguous: + weights = weights.copy() + axis = normalize_axis_index(axis, input.ndim) + if _invalid_origin(origin, len(weights)): + raise ValueError('Invalid origin; origin must satisfy ' + '-(len(weights) // 2) <= origin <= ' + '(len(weights)-1) // 2') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate1d(input, weights, axis, output, mode, cval, + origin) + return output + + + +def convolve1d(input, weights, axis=-1, output=None, mode="reflect", + cval=0.0, origin=0): + """Calculate a 1-D convolution along the given axis. + + The lines of the array along the given axis are convolved with the + given weights. + + Parameters + ---------- + %(input)s + weights : ndarray + 1-D sequence of numbers. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + convolve1d : ndarray + Convolved array with same shape as input + + Examples + -------- + >>> from scipy.ndimage import convolve1d + >>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3]) + array([14, 24, 4, 13, 12, 36, 27, 0]) + """ + weights = weights[::-1] + origin = -origin + if not len(weights) & 1: + origin -= 1 + weights = np.asarray(weights) + if weights.dtype.kind == 'c': + # pre-conjugate here to counteract the conjugation in correlate1d + weights = weights.conj() + return correlate1d(input, weights, axis, output, mode, cval, origin) + + +def _gaussian_kernel1d(sigma, order, radius): + """ + Computes a 1-D Gaussian convolution kernel. + """ + if order < 0: + raise ValueError('order must be non-negative') + exponent_range = np.arange(order + 1) + sigma2 = sigma * sigma + x = np.arange(-radius, radius+1) + phi_x = np.exp(-0.5 / sigma2 * x ** 2) + phi_x = phi_x / phi_x.sum() + + if order == 0: + return phi_x + else: + # f(x) = q(x) * phi(x) = q(x) * exp(p(x)) + # f'(x) = (q'(x) + q(x) * p'(x)) * phi(x) + # p'(x) = -1 / sigma ** 2 + # Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the + # coefficients of q(x) + q = np.zeros(order + 1) + q[0] = 1 + D = np.diag(exponent_range[1:], 1) # D @ q(x) = q'(x) + P = np.diag(np.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x) + Q_deriv = D + P + for _ in range(order): + q = Q_deriv.dot(q) + q = (x[:, None] ** exponent_range).dot(q) + return q * phi_x + + + +def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None): + """1-D Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar + standard deviation for Gaussian kernel + %(axis)s + order : int, optional + An order of 0 corresponds to convolution with a Gaussian + kernel. A positive order corresponds to convolution with + that derivative of a Gaussian. + %(output)s + %(mode_reflect)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int, optional + Radius of the Gaussian kernel. If specified, the size of + the kernel will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + + Returns + ------- + gaussian_filter1d : ndarray + + Notes + ----- + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, a default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter1d + >>> import numpy as np + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1) + array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905]) + >>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4) + array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657]) + >>> import matplotlib.pyplot as plt + >>> rng = np.random.default_rng() + >>> x = rng.standard_normal(101).cumsum() + >>> y3 = gaussian_filter1d(x, 3) + >>> y6 = gaussian_filter1d(x, 6) + >>> plt.plot(x, 'k', label='original data') + >>> plt.plot(y3, '--', label='filtered, sigma=3') + >>> plt.plot(y6, ':', label='filtered, sigma=6') + >>> plt.legend() + >>> plt.grid() + >>> plt.show() + + """ + sd = float(sigma) + # make the radius of the filter equal to truncate standard deviations + lw = int(truncate * sd + 0.5) + if radius is not None: + lw = radius + if not isinstance(lw, numbers.Integral) or lw < 0: + raise ValueError('Radius must be a nonnegative integer.') + # Since we are calling correlate, not convolve, revert the kernel + weights = _gaussian_kernel1d(sigma, order, lw)[::-1] + return correlate1d(input, weights, axis, output, mode, cval, 0) + + + +def gaussian_filter(input, sigma, order=0, output=None, + mode="reflect", cval=0.0, truncate=4.0, *, radius=None, + axes=None): + """Multidimensional Gaussian filter. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + Standard deviation for Gaussian kernel. The standard + deviations of the Gaussian filter are given for each axis as a + sequence, or as a single number, in which case it is equal for + all axes. + order : int or sequence of ints, optional + The order of the filter along each axis is given as a sequence + of integers, or as a single number. An order of 0 corresponds + to convolution with a Gaussian kernel. A positive order + corresponds to convolution with that derivative of a Gaussian. + %(output)s + %(mode_multiple)s + %(cval)s + truncate : float, optional + Truncate the filter at this many standard deviations. + Default is 4.0. + radius : None or int or sequence of ints, optional + Radius of the Gaussian kernel. The radius are given for each axis + as a sequence, or as a single number, in which case it is equal + for all axes. If specified, the size of the kernel along each axis + will be ``2*radius + 1``, and `truncate` is ignored. + Default is None. + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `sigma`, `order`, `mode` and/or `radius` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + gaussian_filter : ndarray + Returned array of same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D convolution filters. The intermediate arrays are + stored in the same data type as the output. Therefore, for output + types with a limited precision, the results may be imprecise + because intermediate results may be stored with insufficient + precision. + + The Gaussian kernel will have size ``2*radius + 1`` along each axis. If + `radius` is None, the default ``radius = round(truncate * sigma)`` will be + used. + + Examples + -------- + >>> from scipy.ndimage import gaussian_filter + >>> import numpy as np + >>> a = np.arange(50, step=2).reshape((5,5)) + >>> a + array([[ 0, 2, 4, 6, 8], + [10, 12, 14, 16, 18], + [20, 22, 24, 26, 28], + [30, 32, 34, 36, 38], + [40, 42, 44, 46, 48]]) + >>> gaussian_filter(a, sigma=1) + array([[ 4, 6, 8, 9, 11], + [10, 12, 14, 15, 17], + [20, 22, 24, 25, 27], + [29, 31, 33, 34, 36], + [35, 37, 39, 40, 42]]) + + >>> from scipy import datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = gaussian_filter(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + output = _ni_support._get_output(output, input) + + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + orders = _ni_support._normalize_sequence(order, num_axes) + sigmas = _ni_support._normalize_sequence(sigma, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + radiuses = _ni_support._normalize_sequence(radius, num_axes) + axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii], radiuses[ii]) + for ii in range(num_axes) if sigmas[ii] > 1e-15] + if len(axes) > 0: + for axis, sigma, order, mode, radius in axes: + gaussian_filter1d(input, sigma, axis, order, output, + mode, cval, truncate, radius=radius) + input = output + else: + output[...] = input[...] + return output + + + +def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Prewitt filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + prewitt : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + sobel: Sobel filter + + Notes + ----- + This function computes the one-dimensional Prewitt filter. + Horizontal edges are emphasised with the horizontal transform (axis=0), + vertical edges with the vertical transform (axis=1), and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent() + >>> prewitt_h = ndimage.prewitt(ascent, axis=0) + >>> prewitt_v = ndimage.prewitt(ascent, axis=1) + >>> magnitude = np.sqrt(prewitt_h ** 2 + prewitt_v ** 2) + >>> magnitude *= 255 / np.max(magnitude) # Normalization + >>> fig, axes = plt.subplots(2, 2, figsize = (8, 8)) + >>> plt.gray() + >>> axes[0, 0].imshow(ascent) + >>> axes[0, 1].imshow(prewitt_h) + >>> axes[1, 0].imshow(prewitt_v) + >>> axes[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axes.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,) + return output + + + +def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0): + """Calculate a Sobel filter. + + Parameters + ---------- + %(input)s + %(axis)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + sobel : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function computes the axis-specific Sobel gradient. + The horizontal edges can be emphasised with the horizontal transform (axis=0), + the vertical edges with the vertical transform (axis=1) and so on for higher + dimensions. These can be combined to give the magnitude. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> ascent = datasets.ascent().astype('int32') + >>> sobel_h = ndimage.sobel(ascent, 0) # horizontal gradient + >>> sobel_v = ndimage.sobel(ascent, 1) # vertical gradient + >>> magnitude = np.sqrt(sobel_h**2 + sobel_v**2) + >>> magnitude *= 255.0 / np.max(magnitude) # normalization + >>> fig, axs = plt.subplots(2, 2, figsize=(8, 8)) + >>> plt.gray() # show the filtered result in grayscale + >>> axs[0, 0].imshow(ascent) + >>> axs[0, 1].imshow(sobel_h) + >>> axs[1, 0].imshow(sobel_v) + >>> axs[1, 1].imshow(magnitude) + >>> titles = ["original", "horizontal", "vertical", "magnitude"] + >>> for i, ax in enumerate(axs.ravel()): + ... ax.set_title(titles[i]) + ... ax.axis("off") + >>> plt.show() + + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + output = _ni_support._get_output(output, input) + modes = _ni_support._normalize_sequence(mode, input.ndim) + correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0) + axes = [ii for ii in range(input.ndim) if ii != axis] + for ii in axes: + correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0) + return output + + + +def generic_laplace(input, derivative2, output=None, mode="reflect", + cval=0.0, + extra_arguments=(), + extra_keywords=None): + """ + N-D Laplace filter using a provided second derivative function. + + Parameters + ---------- + %(input)s + derivative2 : callable + Callable with the following signature:: + + derivative2(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + + Returns + ------- + generic_laplace : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative2(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + for ii in range(1, len(axes)): + tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + output += tmp + else: + output[...] = input[...] + return output + + + +def laplace(input, output=None, mode="reflect", cval=0.0): + """N-D Laplace filter based on approximate second derivatives. + + Parameters + ---------- + %(input)s + %(output)s + %(mode_multiple)s + %(cval)s + + Returns + ------- + laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.laplace(ascent) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + def derivative2(input, axis, output, mode, cval): + return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0) + return generic_laplace(input, derivative2, output, mode, cval) + + + +def gaussian_laplace(input, sigma, output=None, mode="reflect", + cval=0.0, **kwargs): + """Multidimensional Laplace filter using Gaussian second derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_laplace : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> ascent = datasets.ascent() + + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + + >>> result = ndimage.gaussian_laplace(ascent, sigma=1) + >>> ax1.imshow(result) + + >>> result = ndimage.gaussian_laplace(ascent, sigma=3) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + + def derivative2(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 2 + return gaussian_filter(input, sigma, order, output, mode, cval, + **kwargs) + + return generic_laplace(input, derivative2, output, mode, cval, + extra_arguments=(sigma,), + extra_keywords=kwargs) + + + +def generic_gradient_magnitude(input, derivative, output=None, + mode="reflect", cval=0.0, + extra_arguments=(), extra_keywords=None): + """Gradient magnitude using a provided gradient function. + + Parameters + ---------- + %(input)s + derivative : callable + Callable with the following signature:: + + derivative(input, axis, output, mode, cval, + *extra_arguments, **extra_keywords) + + See `extra_arguments`, `extra_keywords` below. + `derivative` can assume that `input` and `output` are ndarrays. + Note that the output from `derivative` is modified inplace; + be careful to copy important inputs before returning them. + %(output)s + %(mode_multiple)s + %(cval)s + %(extra_keywords)s + %(extra_arguments)s + + Returns + ------- + generic_gradient_matnitude : ndarray + Filtered array. Has the same shape as `input`. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + output = _ni_support._get_output(output, input) + axes = list(range(input.ndim)) + if len(axes) > 0: + modes = _ni_support._normalize_sequence(mode, len(axes)) + derivative(input, axes[0], output, modes[0], cval, + *extra_arguments, **extra_keywords) + np.multiply(output, output, output) + for ii in range(1, len(axes)): + tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval, + *extra_arguments, **extra_keywords) + np.multiply(tmp, tmp, tmp) + output += tmp + # This allows the sqrt to work with a different default casting + np.sqrt(output, output, casting='unsafe') + else: + output[...] = input[...] + return output + + + +def gaussian_gradient_magnitude(input, sigma, output=None, + mode="reflect", cval=0.0, **kwargs): + """Multidimensional gradient magnitude using Gaussian derivatives. + + Parameters + ---------- + %(input)s + sigma : scalar or sequence of scalars + The standard deviations of the Gaussian filter are given for + each axis as a sequence, or as a single number, in which case + it is equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + Extra keyword arguments will be passed to gaussian_filter(). + + Returns + ------- + gaussian_gradient_magnitude : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + + def derivative(input, axis, output, mode, cval, sigma, **kwargs): + order = [0] * input.ndim + order[axis] = 1 + return gaussian_filter(input, sigma, order, output, mode, + cval, **kwargs) + + return generic_gradient_magnitude(input, derivative, output, mode, + cval, extra_arguments=(sigma,), + extra_keywords=kwargs) + + +def _correlate_or_convolve(input, weights, output, mode, cval, origin, + convolution): + input = np.asarray(input) + weights = np.asarray(weights) + complex_input = input.dtype.kind == 'c' + complex_weights = weights.dtype.kind == 'c' + if complex_input or complex_weights: + if complex_weights and not convolution: + # As for np.correlate, conjugate weights rather than input. + weights = weights.conj() + kwargs = dict( + mode=mode, origin=origin, convolution=convolution + ) + output = _ni_support._get_output(output, input, complex_output=True) + + return _complex_via_real_components(_correlate_or_convolve, input, + weights, output, cval, **kwargs) + + origins = _ni_support._normalize_sequence(origin, input.ndim) + weights = np.asarray(weights, dtype=np.float64) + wshape = [ii for ii in weights.shape if ii > 0] + if len(wshape) != input.ndim: + raise RuntimeError('filter weights array has incorrect shape.') + if convolution: + weights = weights[tuple([slice(None, None, -1)] * weights.ndim)] + for ii in range(len(origins)): + origins[ii] = -origins[ii] + if not weights.shape[ii] & 1: + origins[ii] -= 1 + for origin, lenw in zip(origins, wshape): + if _invalid_origin(origin, lenw): + raise ValueError('Invalid origin; origin must satisfy ' + '-(weights.shape[k] // 2) <= origin[k] <= ' + '(weights.shape[k]-1) // 2') + + if not weights.flags.contiguous: + weights = weights.copy() + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError("A sequence of modes is not supported") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.correlate(input, weights, output, mode, cval, origins) + if temp_needed: + temp[...] = output + output = temp + return output + + + +def correlate(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multidimensional correlation. + + The array is correlated with the given kernel. + + Parameters + ---------- + %(input)s + weights : ndarray + array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + + Returns + ------- + result : ndarray + The result of correlation of `input` with `weights`. + + See Also + -------- + convolve : Convolve an image with a kernel. + + Examples + -------- + Correlation is the process of moving a filter mask often referred to + as kernel over the image and computing the sum of products at each location. + + >>> from scipy.ndimage import correlate + >>> import numpy as np + >>> input_img = np.arange(25).reshape(5,5) + >>> print(input_img) + [[ 0 1 2 3 4] + [ 5 6 7 8 9] + [10 11 12 13 14] + [15 16 17 18 19] + [20 21 22 23 24]] + + Define a kernel (weights) for correlation. In this example, it is for sum of + center and up, down, left and right next elements. + + >>> weights = [[0, 1, 0], + ... [1, 1, 1], + ... [0, 1, 0]] + + We can calculate a correlation result: + For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``. + + >>> correlate(input_img, weights) + array([[ 6, 10, 15, 20, 24], + [ 26, 30, 35, 40, 44], + [ 51, 55, 60, 65, 69], + [ 76, 80, 85, 90, 94], + [ 96, 100, 105, 110, 114]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, False) + + + +def convolve(input, weights, output=None, mode='reflect', cval=0.0, + origin=0): + """ + Multidimensional convolution. + + The array is convolved with the given kernel. + + Parameters + ---------- + %(input)s + weights : array_like + Array of weights, same number of dimensions as input + %(output)s + %(mode_reflect)s + cval : scalar, optional + Value to fill past edges of input if `mode` is 'constant'. Default + is 0.0 + origin : int, optional + Controls the origin of the input signal, which is where the + filter is centered to produce the first element of the output. + Positive values shift the filter to the right, and negative values + shift the filter to the left. Default is 0. + + Returns + ------- + result : ndarray + The result of convolution of `input` with `weights`. + + See Also + -------- + correlate : Correlate an image with a kernel. + + Notes + ----- + Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where + W is the `weights` kernel, + j is the N-D spatial index over :math:`W`, + I is the `input` and k is the coordinate of the center of + W, specified by `origin` in the input parameters. + + Examples + -------- + Perhaps the simplest case to understand is ``mode='constant', cval=0.0``, + because in this case borders (i.e., where the `weights` kernel, centered + on any one value, extends beyond an edge of `input`) are treated as zeros. + + >>> import numpy as np + >>> a = np.array([[1, 2, 0, 0], + ... [5, 3, 0, 4], + ... [0, 0, 0, 7], + ... [9, 3, 0, 0]]) + >>> k = np.array([[1,1,1],[1,1,0],[1,0,0]]) + >>> from scipy import ndimage + >>> ndimage.convolve(a, k, mode='constant', cval=0.0) + array([[11, 10, 7, 4], + [10, 3, 11, 11], + [15, 12, 14, 7], + [12, 3, 7, 0]]) + + Setting ``cval=1.0`` is equivalent to padding the outer edge of `input` + with 1.0's (and then extracting only the original region of the result). + + >>> ndimage.convolve(a, k, mode='constant', cval=1.0) + array([[13, 11, 8, 7], + [11, 3, 11, 14], + [16, 12, 14, 10], + [15, 6, 10, 5]]) + + With ``mode='reflect'`` (the default), outer values are reflected at the + edge of `input` to fill in missing values. + + >>> b = np.array([[2, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0,1,0], [0,1,0], [0,1,0]]) + >>> ndimage.convolve(b, k, mode='reflect') + array([[5, 0, 0], + [3, 0, 0], + [1, 0, 0]]) + + This includes diagonally at the corners. + + >>> k = np.array([[1,0,0],[0,1,0],[0,0,1]]) + >>> ndimage.convolve(b, k) + array([[4, 2, 0], + [3, 2, 0], + [1, 1, 0]]) + + With ``mode='nearest'``, the single nearest value in to an edge in + `input` is repeated as many times as needed to match the overlapping + `weights`. + + >>> c = np.array([[2, 0, 1], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> k = np.array([[0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0], + ... [0, 1, 0]]) + >>> ndimage.convolve(c, k, mode='nearest') + array([[7, 0, 3], + [5, 0, 2], + [3, 0, 1]]) + + """ + return _correlate_or_convolve(input, weights, output, mode, cval, + origin, True) + + + +def uniform_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D uniform filter along the given axis. + + The lines of the array along the given axis are filtered with a + uniform filter of given size. + + Parameters + ---------- + %(input)s + size : int + length of uniform filter + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray + Filtered array. Has same shape as `input`. + + Examples + -------- + >>> from scipy.ndimage import uniform_filter1d + >>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([4, 3, 4, 1, 4, 6, 6, 3]) + """ + input = np.asarray(input) + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + complex_output = input.dtype.kind == 'c' + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + if not complex_output: + _nd_image.uniform_filter1d(input, size, axis, output, mode, cval, + origin) + else: + _nd_image.uniform_filter1d(input.real, size, axis, output.real, mode, + np.real(cval), origin) + _nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode, + np.imag(cval), origin) + return output + + + +def uniform_filter(input, size=3, output=None, mode="reflect", + cval=0.0, origin=0, *, axes=None): + """Multidimensional uniform filter. + + Parameters + ---------- + %(input)s + size : int or sequence of ints, optional + The sizes of the uniform filter are given for each axis as a + sequence, or as a single number, in which case the size is + equal for all axes. + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + uniform_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D uniform filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.uniform_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + input = np.asarray(input) + output = _ni_support._get_output(output, input, + complex_output=input.dtype.kind == 'c') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(num_axes) if sizes[ii] > 1] + if len(axes) > 0: + for axis, size, origin, mode in axes: + uniform_filter1d(input, int(size), axis, output, mode, + cval, origin) + input = output + else: + output[...] = input[...] + return output + + + +def minimum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D minimum filter along the given axis. + + The lines of the array along the given axis are filtered with a + minimum filter of given size. + + Parameters + ---------- + %(input)s + size : int + length along which to calculate 1D minimum + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + result : ndarray. + Filtered image. Has the same shape as `input`. + + Notes + ----- + This function implements the MINLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + + Examples + -------- + >>> from scipy.ndimage import minimum_filter1d + >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([2, 0, 0, 0, 1, 1, 0, 0]) + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 1) + return output + + + +def maximum_filter1d(input, size, axis=-1, output=None, + mode="reflect", cval=0.0, origin=0): + """Calculate a 1-D maximum filter along the given axis. + + The lines of the array along the given axis are filtered with a + maximum filter of given size. + + Parameters + ---------- + %(input)s + size : int + Length along which to calculate the 1-D maximum. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + + Returns + ------- + maximum1d : ndarray, None + Maximum-filtered array with same shape as input. + None if `output` is not None + + Notes + ----- + This function implements the MAXLIST algorithm [1]_, as described by + Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being + the `input` length, regardless of filter size. + + References + ---------- + .. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777 + .. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html + + Examples + -------- + >>> from scipy.ndimage import maximum_filter1d + >>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) + array([8, 8, 8, 4, 9, 9, 9, 9]) + """ + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axis = normalize_axis_index(axis, input.ndim) + if size < 1: + raise RuntimeError('incorrect filter size') + output = _ni_support._get_output(output, input) + if (size // 2 + origin < 0) or (size // 2 + origin >= size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, + origin, 0) + return output + + +def _min_or_max_filter(input, size, footprint, structure, output, mode, + cval, origin, minimum, axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + if structure is None: + if footprint is None: + if size is None: + raise RuntimeError("no footprint provided") + separable = True + else: + footprint = np.asarray(footprint, dtype=bool) + if not footprint.any(): + raise ValueError("All-zero footprint is not supported.") + if footprint.all(): + size = footprint.shape + footprint = None + separable = True + else: + separable = False + else: + structure = np.asarray(structure, dtype=np.float64) + separable = False + if footprint is None: + footprint = np.ones(structure.shape, bool) + else: + footprint = np.asarray(footprint, dtype=bool) + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError("Complex type not supported") + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + if separable: + origins = _ni_support._normalize_sequence(origin, num_axes) + sizes = _ni_support._normalize_sequence(size, num_axes) + modes = _ni_support._normalize_sequence(mode, num_axes) + axes = [(axes[ii], sizes[ii], origins[ii], modes[ii]) + for ii in range(len(axes)) if sizes[ii] > 1] + if minimum: + filter_ = minimum_filter1d + else: + filter_ = maximum_filter1d + if len(axes) > 0: + for axis, size, origin, mode in axes: + filter_(input, int(size), axis, output, mode, cval, origin) + input = output + else: + output[...] = input[...] + else: + origins = _ni_support._normalize_sequence(origin, num_axes) + if num_axes < input.ndim: + if footprint.ndim != num_axes: + raise RuntimeError("footprint array has incorrect shape") + footprint = np.expand_dims( + footprint, + tuple(ax for ax in range(input.ndim) if ax not in axes) + ) + # set origin = 0 for any axes not being filtered + origins_temp = [0,] * input.ndim + for o, ax in zip(origins, axes): + origins_temp[ax] = o + origins = origins_temp + + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError("invalid origin") + if not footprint.flags.contiguous: + footprint = footprint.copy() + if structure is not None: + if len(structure.shape) != input.ndim: + raise RuntimeError("structure array has incorrect shape") + if num_axes != structure.ndim: + structure = np.expand_dims( + structure, + tuple(ax for ax in range(structure.ndim) if ax not in axes) + ) + if not structure.flags.contiguous: + structure = structure.copy() + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported for non-separable " + "footprints") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.min_or_max_filter(input, footprint, structure, output, + mode, cval, origins, minimum) + if temp_needed: + temp[...] = output + output = temp + return output + + + +def minimum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional minimum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + minimum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.minimum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 1, axes) + + + +def maximum_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional maximum filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_multiple)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. When `axes` is + specified, any tuples used for `size`, `origin`, and/or `mode` + must match the length of `axes`. The ith entry in any of these tuples + corresponds to the ith entry in `axes`. + + Returns + ------- + maximum_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + A sequence of modes (one per axis) is only supported when the footprint is + separable. Otherwise, a single mode string must be provided. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.maximum_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _min_or_max_filter(input, size, footprint, None, output, mode, + cval, origin, 0, axes) + + + +def _rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, operation='rank', + axes=None): + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=3) + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + axes = _ni_support._check_axes(axes, input.ndim) + num_axes = len(axes) + origins = _ni_support._normalize_sequence(origin, num_axes) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, num_axes) + footprint = np.ones(sizes, dtype=bool) + else: + footprint = np.asarray(footprint, dtype=bool) + if num_axes < input.ndim: + # set origin = 0 for any axes not being filtered + origins_temp = [0,] * input.ndim + for o, ax in zip(origins, axes): + origins_temp[ax] = o + origins = origins_temp + + if not isinstance(mode, str) and isinstance(mode, Iterable): + # set mode = 'constant' for any axes not being filtered + modes = _ni_support._normalize_sequence(mode, num_axes) + modes_temp = ['constant'] * input.ndim + for m, ax in zip(modes, axes): + modes_temp[ax] = m + mode = modes_temp + + # insert singleton dimension along any non-filtered axes + if footprint.ndim != num_axes: + raise RuntimeError("footprint array has incorrect shape") + footprint = np.expand_dims( + footprint, + tuple(ax for ax in range(input.ndim) if ax not in axes) + ) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + filter_size = np.where(footprint, 1, 0).sum() + if operation == 'median': + rank = filter_size // 2 + elif operation == 'percentile': + percentile = rank + if percentile < 0.0: + percentile += 100.0 + if percentile < 0 or percentile > 100: + raise RuntimeError('invalid percentile') + if percentile == 100.0: + rank = filter_size - 1 + else: + rank = int(float(filter_size) * percentile / 100.0) + if rank < 0: + rank += filter_size + if rank < 0 or rank >= filter_size: + raise RuntimeError('rank not within filter footprint size') + if rank == 0: + return minimum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + elif rank == filter_size - 1: + return maximum_filter(input, None, footprint, output, mode, cval, + origins, axes=None) + else: + output = _ni_support._get_output(output, input) + temp_needed = np.may_share_memory(input, output) + if temp_needed: + # input and output arrays cannot share memory + temp = output + output = _ni_support._get_output(output.dtype, input) + if not isinstance(mode, str) and isinstance(mode, Iterable): + raise RuntimeError( + "A sequence of modes is not supported by non-separable rank " + "filters") + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.rank_filter(input, rank, footprint, output, mode, cval, + origins) + if temp_needed: + temp[...] = output + output = temp + return output + + + +def rank_filter(input, rank, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """Calculate a multidimensional rank filter. + + Parameters + ---------- + %(input)s + rank : int + The rank parameter may be less than zero, i.e., rank = -1 + indicates the largest element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + rank_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.rank_filter(ascent, rank=42, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + rank = operator.index(rank) + return _rank_filter(input, rank, size, footprint, output, mode, cval, + origin, 'rank', axes=axes) + + + +def median_filter(input, size=None, footprint=None, output=None, + mode="reflect", cval=0.0, origin=0, *, axes=None): + """ + Calculate a multidimensional median filter. + + Parameters + ---------- + %(input)s + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + median_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + scipy.signal.medfilt2d + + Notes + ----- + For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes + the specialised function `scipy.signal.medfilt2d` may be faster. It is + however limited to constant mode with ``cval=0``. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.median_filter(ascent, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, 0, size, footprint, output, mode, cval, + origin, 'median', axes=axes) + + + +def percentile_filter(input, percentile, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, *, + axes=None): + """Calculate a multidimensional percentile filter. + + Parameters + ---------- + %(input)s + percentile : scalar + The percentile parameter may be less than zero, i.e., + percentile = -20 equals percentile = 80 + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + axes : tuple of int or None, optional + If None, `input` is filtered along all axes. Otherwise, + `input` is filtered along the specified axes. + + Returns + ------- + percentile_filter : ndarray + Filtered array. Has the same shape as `input`. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> plt.gray() # show the filtered result in grayscale + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.percentile_filter(ascent, percentile=20, size=20) + >>> ax1.imshow(ascent) + >>> ax2.imshow(result) + >>> plt.show() + """ + return _rank_filter(input, percentile, size, footprint, output, mode, + cval, origin, 'percentile', axes=axes) + + + +def generic_filter1d(input, function, filter_size, axis=-1, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a 1-D filter along the given axis. + + `generic_filter1d` iterates over the lines of the array, calling the + given function at each line. The arguments of the line are the + input line, and the output line. The input and output lines are 1-D + double arrays. The input line is extended appropriately according + to the filter size and origin. The output line must be modified + in-place with the result. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply along given axis. + filter_size : scalar + Length of the filter. + %(axis)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin)s + %(extra_arguments)s + %(extra_keywords)s + + Returns + ------- + generic_filter1d : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int function(double *input_line, npy_intp input_length, + double *output_line, npy_intp output_length, + void *user_data) + int function(double *input_line, intptr_t input_length, + double *output_line, intptr_t output_length, + void *user_data) + + The calling function iterates over the lines of the input and output + arrays, calling the callback function at each line. The current line + is extended according to the border conditions set by the calling + function, and the result is copied into the array that is passed + through ``input_line``. The length of the input line (after extension) + is passed through ``input_length``. The callback function should apply + the filter and store the result in the array passed through + ``output_line``. The length of the output line is passed through + ``output_length``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + """ + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + output = _ni_support._get_output(output, input) + if filter_size < 1: + raise RuntimeError('invalid filter size') + axis = normalize_axis_index(axis, input.ndim) + if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >= + filter_size): + raise ValueError('invalid origin') + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter1d(input, function, filter_size, axis, output, + mode, cval, origin, extra_arguments, + extra_keywords) + return output + + + +def generic_filter(input, function, size=None, footprint=None, + output=None, mode="reflect", cval=0.0, origin=0, + extra_arguments=(), extra_keywords=None): + """Calculate a multidimensional filter using the given function. + + At each element the provided function is called. The input values + within the filter footprint at that element are passed to the function + as a 1-D array of double values. + + Parameters + ---------- + %(input)s + function : {callable, scipy.LowLevelCallable} + Function to apply at each element. + %(size_foot)s + %(output)s + %(mode_reflect)s + %(cval)s + %(origin_multiple)s + %(extra_arguments)s + %(extra_keywords)s + + Returns + ------- + generic_filter : ndarray + Filtered array. Has the same shape as `input`. + + Notes + ----- + This function also accepts low-level callback functions with one of + the following signatures and wrapped in `scipy.LowLevelCallable`: + + .. code:: c + + int callback(double *buffer, npy_intp filter_size, + double *return_value, void *user_data) + int callback(double *buffer, intptr_t filter_size, + double *return_value, void *user_data) + + The calling function iterates over the elements of the input and + output arrays, calling the callback function at each element. The + elements within the footprint of the filter at the current element are + passed through the ``buffer`` parameter, and the number of elements + within the footprint through ``filter_size``. The calculated value is + returned in ``return_value``. ``user_data`` is the data pointer provided + to `scipy.LowLevelCallable` as-is. + + The callback function must return an integer error status that is zero + if something went wrong and one otherwise. If an error occurs, you should + normally set the python error status with an informative message + before returning, otherwise a default error message is set by the + calling function. + + In addition, some other low-level function pointer specifications + are accepted, but these are for backward compatibility only and should + not be used in new code. + + Examples + -------- + Import the necessary modules and load the example image used for + filtering. + + >>> import numpy as np + >>> from scipy import datasets + >>> from scipy.ndimage import zoom, generic_filter + >>> import matplotlib.pyplot as plt + >>> ascent = zoom(datasets.ascent(), 0.5) + + Compute a maximum filter with kernel size 5 by passing a simple NumPy + aggregation function as argument to `function`. + + >>> maximum_filter_result = generic_filter(ascent, np.amax, [5, 5]) + + While a maximmum filter could also directly be obtained using + `maximum_filter`, `generic_filter` allows generic Python function or + `scipy.LowLevelCallable` to be used as a filter. Here, we compute the + range between maximum and minimum value as an example for a kernel size + of 5. + + >>> def custom_filter(image): + ... return np.amax(image) - np.amin(image) + >>> custom_filter_result = generic_filter(ascent, custom_filter, [5, 5]) + + Plot the original and filtered images. + + >>> fig, axes = plt.subplots(3, 1, figsize=(3, 9)) + >>> plt.gray() # show the filtered result in grayscale + >>> top, middle, bottom = axes + >>> for ax in axes: + ... ax.set_axis_off() # remove coordinate system + >>> top.imshow(ascent) + >>> top.set_title("Original image") + >>> middle.imshow(maximum_filter_result) + >>> middle.set_title("Maximum filter, Kernel: 5x5") + >>> bottom.imshow(custom_filter_result) + >>> bottom.set_title("Custom filter, Kernel: 5x5") + >>> fig.tight_layout() + + """ + if (size is not None) and (footprint is not None): + warnings.warn("ignoring size because footprint is set", + UserWarning, stacklevel=2) + if extra_keywords is None: + extra_keywords = {} + input = np.asarray(input) + if np.iscomplexobj(input): + raise TypeError('Complex type not supported') + origins = _ni_support._normalize_sequence(origin, input.ndim) + if footprint is None: + if size is None: + raise RuntimeError("no footprint or filter size provided") + sizes = _ni_support._normalize_sequence(size, input.ndim) + footprint = np.ones(sizes, dtype=bool) + else: + footprint = np.asarray(footprint, dtype=bool) + fshape = [ii for ii in footprint.shape if ii > 0] + if len(fshape) != input.ndim: + raise RuntimeError('filter footprint array has incorrect shape.') + for origin, lenf in zip(origins, fshape): + if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf): + raise ValueError('invalid origin') + if not footprint.flags.contiguous: + footprint = footprint.copy() + output = _ni_support._get_output(output, input) + mode = _ni_support._extend_mode_to_code(mode) + _nd_image.generic_filter(input, function, footprint, output, mode, + cval, origins, extra_arguments, extra_keywords) + return output \ No newline at end of file diff --git a/bioxelnodes/scipy/_interpolation.py b/bioxelnodes/scipy/_interpolation.py new file mode 100644 index 0000000..0384130 --- /dev/null +++ b/bioxelnodes/scipy/_interpolation.py @@ -0,0 +1,313 @@ +import numpy as np +import warnings + +from . import _ni_support +from . import _nd_image +from ._utils import normalize_axis_index + + +def _prepad_for_spline_filter(input, mode, cval): + if mode in ['nearest', 'grid-constant']: + npad = 12 + if mode == 'grid-constant': + padded = np.pad(input, npad, mode='constant', + constant_values=cval) + elif mode == 'nearest': + padded = np.pad(input, npad, mode='edge') + else: + # other modes have exact boundary conditions implemented so + # no prepadding is needed + npad = 0 + padded = input + return padded, npad + +def spline_filter1d(input, order=3, axis=-1, output=np.float64, + mode='mirror'): + """ + Calculate a 1-D spline filter along the given axis. + + The lines of the array along the given axis are filtered by a + spline filter. The order of the spline must be >= 2 and <= 5. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + axis : int, optional + The axis along which the spline filter is applied. Default is the last + axis. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter1d : ndarray + The filtered input. + + See Also + -------- + spline_filter : Multidimensional spline filter. + + Notes + ----- + All of the interpolation functions in `ndimage` do spline interpolation of + the input image. If using B-splines of `order > 1`, the input image + values have to be converted to B-spline coefficients first, which is + done by applying this 1-D filter sequentially along all + axes of the input. All functions that require B-spline coefficients + will automatically filter their inputs, a behavior controllable with + the `prefilter` keyword argument. For functions that accept a `mode` + parameter, the result will only be correct if it matches the `mode` + used when filtering. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using 1-D spline along the given axis: + + >>> from scipy.ndimage import spline_filter1d + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0) + >>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1) + >>> f, ax = plt.subplots(1, 3, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter_axis_0, "spline filter (axis=0)"], + ... [sp_filter_axis_1, "spline filter (axis=1)"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter1d(input.real, order, axis, output.real, mode) + spline_filter1d(input.imag, order, axis, output.imag, mode) + return output + if order in [0, 1]: + output[...] = np.array(input) + else: + mode = _ni_support._extend_mode_to_code(mode) + axis = normalize_axis_index(axis, input.ndim) + _nd_image.spline_filter1d(input, order, axis, output, mode) + return output + + +def spline_filter(input, order=3, output=np.float64, mode='mirror'): + """ + Multidimensional spline filter. + + Parameters + ---------- + %(input)s + order : int, optional + The order of the spline, default is 3. + output : ndarray or dtype, optional + The array in which to place the output, or the dtype of the returned + array. Default is ``numpy.float64``. + %(mode_interp_mirror)s + + Returns + ------- + spline_filter : ndarray + Filtered array. Has the same shape as `input`. + + See Also + -------- + spline_filter1d : Calculate a 1-D spline filter along the given axis. + + Notes + ----- + The multidimensional filter is implemented as a sequence of + 1-D spline filters. The intermediate arrays are stored + in the same data type as the output. Therefore, for output types + with a limited precision, the results may be imprecise because + intermediate results may be stored with insufficient precision. + + For complex-valued `input`, this function processes the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + We can filter an image using multidimentional splines: + + >>> from scipy.ndimage import spline_filter + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> orig_img = np.eye(20) # create an image + >>> orig_img[10, :] = 1.0 + >>> sp_filter = spline_filter(orig_img, order=3) + >>> f, ax = plt.subplots(1, 2, sharex=True) + >>> for ind, data in enumerate([[orig_img, "original image"], + ... [sp_filter, "spline filter"]]): + ... ax[ind].imshow(data[0], cmap='gray_r') + ... ax[ind].set_title(data[1]) + >>> plt.tight_layout() + >>> plt.show() + + """ + if order < 2 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, + complex_output=complex_output) + if complex_output: + spline_filter(input.real, order, output.real, mode) + spline_filter(input.imag, order, output.imag, mode) + return output + if order not in [0, 1] and input.ndim > 0: + for axis in range(input.ndim): + spline_filter1d(input, order, axis, output=output, mode=mode) + input = output + else: + output[...] = input[...] + return output + + +def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0, + prefilter=True, *, grid_mode=False): + """ + Zoom an array. + + The array is zoomed using spline interpolation of the requested order. + + Parameters + ---------- + %(input)s + zoom : float or sequence + The zoom factor along the axes. If a float, `zoom` is the same for each + axis. If a sequence, `zoom` should contain one value for each axis. + %(output)s + order : int, optional + The order of the spline interpolation, default is 3. + The order has to be in the range 0-5. + %(mode_interp_constant)s + %(cval)s + %(prefilter)s + grid_mode : bool, optional + If False, the distance from the pixel centers is zoomed. Otherwise, the + distance including the full pixel extent is used. For example, a 1d + signal of length 5 is considered to have length 4 when `grid_mode` is + False, but length 5 when `grid_mode` is True. See the following + visual illustration: + + .. code-block:: text + + | pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 | + |<-------------------------------------->| + vs. + |<----------------------------------------------->| + + The starting point of the arrow in the diagram above corresponds to + coordinate location 0 in each mode. + + Returns + ------- + zoom : ndarray + The zoomed input. + + Notes + ----- + For complex-valued `input`, this function zooms the real and imaginary + components independently. + + .. versionadded:: 1.6.0 + Complex-valued support added. + + Examples + -------- + >>> from scipy import ndimage, datasets + >>> import matplotlib.pyplot as plt + + >>> fig = plt.figure() + >>> ax1 = fig.add_subplot(121) # left side + >>> ax2 = fig.add_subplot(122) # right side + >>> ascent = datasets.ascent() + >>> result = ndimage.zoom(ascent, 3.0) + >>> ax1.imshow(ascent, vmin=0, vmax=255) + >>> ax2.imshow(result, vmin=0, vmax=255) + >>> plt.show() + + >>> print(ascent.shape) + (512, 512) + + >>> print(result.shape) + (1536, 1536) + """ + if order < 0 or order > 5: + raise RuntimeError('spline order not supported') + input = np.asarray(input) + if input.ndim < 1: + raise RuntimeError('input and output rank must be > 0') + zoom = _ni_support._normalize_sequence(zoom, input.ndim) + output_shape = tuple( + [int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)]) + complex_output = np.iscomplexobj(input) + output = _ni_support._get_output(output, input, shape=output_shape, + complex_output=complex_output) + if complex_output: + # import under different name to avoid confusion with zoom parameter + from scipy.ndimage._interpolation import zoom as _zoom + + kwargs = dict(order=order, mode=mode, prefilter=prefilter) + _zoom(input.real, zoom, output=output.real, + cval=np.real(cval), **kwargs) + _zoom(input.imag, zoom, output=output.imag, + cval=np.imag(cval), **kwargs) + return output + if prefilter and order > 1: + padded, npad = _prepad_for_spline_filter(input, mode, cval) + filtered = spline_filter(padded, order, output=np.float64, mode=mode) + else: + npad = 0 + filtered = input + if grid_mode: + # warn about modes that may have surprising behavior + suggest_mode = None + if mode == 'constant': + suggest_mode = 'grid-constant' + elif mode == 'wrap': + suggest_mode = 'grid-wrap' + if suggest_mode is not None: + warnings.warn( + (f"It is recommended to use mode = {suggest_mode} instead of {mode} " + f"when grid_mode is True."), + stacklevel=2 + ) + mode = _ni_support._extend_mode_to_code(mode) + + zoom_div = np.array(output_shape) + zoom_nominator = np.array(input.shape) + if not grid_mode: + zoom_div -= 1 + zoom_nominator -= 1 + + # Zooming to infinite values is unpredictable, so just choose + # zoom factor 1 instead + zoom = np.divide(zoom_nominator, zoom_div, + out=np.ones_like(input.shape, dtype=np.float64), + where=zoom_div != 0) + zoom = np.ascontiguousarray(zoom) + _nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad, + grid_mode) + return output diff --git a/bioxelnodes/scipy/_nd_image.cp311-win_amd64.dll.a b/bioxelnodes/scipy/_nd_image.cp311-win_amd64.dll.a new file mode 100644 index 0000000000000000000000000000000000000000..6f540f09494b99d084ffb1686fc8933d5f2b0a83 GIT binary patch literal 1568 zcma)6F>lmR40dx=pi>8i!u*h^gsKu=?vflxh`ka+|G~ScInf*LIH8lEf1ndX2Sx^d z2tqe9@&gzd7#Z2%vz<7(rmbShezu?Up8febk6sq*kkSj;G#&5API z8c8ST$IJP;38H{wSE1uBJt&>TZ>vIX%0Ew2$$IRy9_OaR@?wDWiN1ol5Tad*=Vh@j z9;aeL+yj5C4O{)Dipy#j$9<{|TP2}q9=V$<)oQg;wIX@~zuk39&lKmZKh9Zh@1(~08}8Z#Gul=DBQYWN!Hn}4Fq7tL z_!-W+dsSMT_3dhe+FhLMI7Ms^CgU@p`;c3>N~T+$F4A&)7is>A&nH1!6o`mkL-v`4 z{{6{&_a|%3ae1L#Ns%bOW9iR&_F*l$3ikg5?{P68UL5&p)kk~ZjNUKs!&V%T^~d_i z=l$k0u8*@ma@-&A;q`GjwuJT}|5kdl_|3lqwk|IlU!(1G|1#1i?v+l-U1-hJw6(Z} z52t4HCrE6~pVX|cqCK_x&MJ@8!Ch$XfFUUB%Xz>ii1*cVB({o(PM~ytjyL=>`@FgT DK?(Yu literal 0 HcmV?d00001 diff --git a/bioxelnodes/scipy/_ni_support.py b/bioxelnodes/scipy/_ni_support.py new file mode 100644 index 0000000..ae8875f --- /dev/null +++ b/bioxelnodes/scipy/_ni_support.py @@ -0,0 +1,119 @@ +# Copyright (C) 2003-2005 Peter J. Verveer +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. The name of the author may not be used to endorse or promote +# products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from collections.abc import Iterable +import operator +import warnings +import numpy as np + + +def _extend_mode_to_code(mode): + """Convert an extension mode to the corresponding integer code. + """ + if mode == 'nearest': + return 0 + elif mode == 'wrap': + return 1 + elif mode in ['reflect', 'grid-mirror']: + return 2 + elif mode == 'mirror': + return 3 + elif mode == 'constant': + return 4 + elif mode == 'grid-wrap': + return 5 + elif mode == 'grid-constant': + return 6 + else: + raise RuntimeError('boundary mode not supported') + + +def _normalize_sequence(input, rank): + """If input is a scalar, create a sequence of length equal to the + rank by duplicating the input. If input is a sequence, + check if its length is equal to the length of array. + """ + is_str = isinstance(input, str) + if not is_str and isinstance(input, Iterable): + normalized = list(input) + if len(normalized) != rank: + err = "sequence argument must have length equal to input rank" + raise RuntimeError(err) + else: + normalized = [input] * rank + return normalized + + +def _get_output(output, input, shape=None, complex_output=False): + if shape is None: + shape = input.shape + if output is None: + if not complex_output: + output = np.zeros(shape, dtype=input.dtype.name) + else: + complex_type = np.promote_types(input.dtype, np.complex64) + output = np.zeros(shape, dtype=complex_type) + elif isinstance(output, (type, np.dtype)): + # Classes (like `np.float32`) and dtypes are interpreted as dtype + if complex_output and np.dtype(output).kind != 'c': + warnings.warn("promoting specified output dtype to complex", stacklevel=3) + output = np.promote_types(output, np.complex64) + output = np.zeros(shape, dtype=output) + elif isinstance(output, str): + output = np.dtype(output) + if complex_output and output.kind != 'c': + raise RuntimeError("output must have complex dtype") + elif not issubclass(output.type, np.number): + raise RuntimeError("output must have numeric dtype") + output = np.zeros(shape, dtype=output) + elif output.shape != shape: + raise RuntimeError("output shape not correct") + elif complex_output and output.dtype.kind != 'c': + raise RuntimeError("output must have complex dtype") + return output + + +def _check_axes(axes, ndim): + if axes is None: + return tuple(range(ndim)) + elif np.isscalar(axes): + axes = (operator.index(axes),) + elif isinstance(axes, Iterable): + for ax in axes: + axes = tuple(operator.index(ax) for ax in axes) + if ax < -ndim or ax > ndim - 1: + raise ValueError(f"specified axis: {ax} is out of range") + axes = tuple(ax % ndim if ax < 0 else ax for ax in axes) + else: + message = "axes must be an integer, iterable of integers, or None" + raise ValueError(message) + if len(tuple(set(axes))) != len(axes): + raise ValueError("axes must be unique") + return axes diff --git a/bioxelnodes/scipy/_utils.py b/bioxelnodes/scipy/_utils.py new file mode 100644 index 0000000..c397ef4 --- /dev/null +++ b/bioxelnodes/scipy/_utils.py @@ -0,0 +1,10 @@ + +def normalize_axis_index(axis, ndim): + # Check if `axis` is in the correct range and normalize it + if axis < -ndim or axis >= ndim: + msg = f"axis {axis} is out of bounds for array of dimension {ndim}" + raise Exception(msg) + + if axis < 0: + axis = axis + ndim + return axis \ No newline at end of file diff --git a/bioxelnodes/skimage/__init__.py b/bioxelnodes/skimage/__init__.py new file mode 100644 index 0000000..79b5d74 --- /dev/null +++ b/bioxelnodes/skimage/__init__.py @@ -0,0 +1 @@ +from ._warps import resize diff --git a/bioxelnodes/skimage/_utils.py b/bioxelnodes/skimage/_utils.py new file mode 100644 index 0000000..f07a428 --- /dev/null +++ b/bioxelnodes/skimage/_utils.py @@ -0,0 +1,37 @@ +import numpy as np + + +def convert_to_float(image, preserve_range): + """Convert input image to float image with the appropriate range. + + Parameters + ---------- + image : ndarray + Input image. + preserve_range : bool + Determines if the range of the image should be kept or transformed + using img_as_float. Also see + https://scikit-image.org/docs/dev/user_guide/data_types.html + + Notes + ----- + * Input images with `float32` data type are not upcast. + + Returns + ------- + image : ndarray + Transformed version of the input. + + """ + if image.dtype == np.float16: + return image.astype(np.float32) + if preserve_range: + # Convert image to double only if it is not single or double + # precision float + if image.dtype.char not in 'df': + image = image.astype(float) + else: + from .dtype import img_as_float + + image = img_as_float(image) + return image diff --git a/bioxelnodes/skimage/_warps.py b/bioxelnodes/skimage/_warps.py new file mode 100644 index 0000000..992e38b --- /dev/null +++ b/bioxelnodes/skimage/_warps.py @@ -0,0 +1,318 @@ +import warnings +import numpy as np +from .. import scipy as ndi +from ._utils import convert_to_float + +def _clip_warp_output(input_image, output_image, mode, cval, clip): + """Clip output image to range of values of input image. + + Note that this function modifies the values of `output_image` in-place + and it is only modified if ``clip=True``. + + Parameters + ---------- + input_image : ndarray + Input image. + output_image : ndarray + Output image, which is modified in-place. + + Other parameters + ---------------- + mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'} + Points outside the boundaries of the input are filled according + to the given mode. Modes match the behaviour of `numpy.pad`. + cval : float + Used in conjunction with mode 'constant', the value outside + the image boundaries. + clip : bool + Whether to clip the output to the range of values of the input image. + This is enabled by default, since higher order interpolation may + produce values outside the given input range. + + """ + if clip: + min_val = np.min(input_image) + if np.isnan(min_val): + # NaNs detected, use NaN-safe min/max + min_func = np.nanmin + max_func = np.nanmax + min_val = min_func(input_image) + else: + min_func = np.min + max_func = np.max + max_val = max_func(input_image) + + # Check if cval has been used such that it expands the effective input + # range + preserve_cval = ( + mode == 'constant' + and not min_val <= cval <= max_val + and min_func(output_image) <= cval <= max_func(output_image) + ) + + # expand min/max range to account for cval + if preserve_cval: + # cast cval to the same dtype as the input image + cval = input_image.dtype.type(cval) + min_val = min(min_val, cval) + max_val = max(max_val, cval) + + # Convert array-like types to ndarrays (gh-7159) + min_val, max_val = np.asarray(min_val), np.asarray(max_val) + np.clip(output_image, min_val, max_val, out=output_image) + + +def _validate_interpolation_order(image_dtype, order): + """Validate and return spline interpolation's order. + + Parameters + ---------- + image_dtype : dtype + Image dtype. + order : int, optional + The order of the spline interpolation. The order has to be in + the range 0-5. See `skimage.transform.warp` for detail. + + Returns + ------- + order : int + if input order is None, returns 0 if image_dtype is bool and 1 + otherwise. Otherwise, image_dtype is checked and input order + is validated accordingly (order > 0 is not supported for bool + image dtype) + + """ + + if order is None: + return 0 if image_dtype == bool else 1 + + if order < 0 or order > 5: + raise ValueError( + "Spline interpolation order has to be in the " "range 0-5.") + + if image_dtype == bool and order != 0: + raise ValueError( + "Input image dtype is bool. Interpolation is not defined " + "with bool data type. Please set order to 0 or explicitly " + "cast input image to another data type." + ) + + return order + + +def _preprocess_resize_output_shape(image, output_shape): + """Validate resize output shape according to input image. + + Parameters + ---------- + image: ndarray + Image to be resized. + output_shape: iterable + Size of the generated output image `(rows, cols[, ...][, dim])`. If + `dim` is not provided, the number of channels is preserved. + + Returns + ------- + image: ndarray + The input image, but with additional singleton dimensions appended in + the case where ``len(output_shape) > input.ndim``. + output_shape: tuple + The output image converted to tuple. + + Raises + ------ + ValueError: + If output_shape length is smaller than the image number of + dimensions + + Notes + ----- + The input image is reshaped if its number of dimensions is not + equal to output_shape_length. + + """ + output_shape = tuple(output_shape) + output_ndim = len(output_shape) + input_shape = image.shape + if output_ndim > image.ndim: + # append dimensions to input_shape + input_shape += (1,) * (output_ndim - image.ndim) + image = np.reshape(image, input_shape) + elif output_ndim == image.ndim - 1: + # multichannel case: append shape of last axis + output_shape = output_shape + (image.shape[-1],) + elif output_ndim < image.ndim: + raise ValueError( + "output_shape length cannot be smaller than the " + "image number of dimensions" + ) + + return image, output_shape + + +def _to_ndimage_mode(mode): + """Convert from `numpy.pad` mode name to the corresponding ndimage mode.""" + mode_translation_dict = dict( + constant='constant', + edge='nearest', + symmetric='reflect', + reflect='mirror', + wrap='wrap', + ) + if mode not in mode_translation_dict: + raise ValueError( + f"Unknown mode: '{mode}', or cannot translate mode. The " + f"mode should be one of 'constant', 'edge', 'symmetric', " + f"'reflect', or 'wrap'. See the documentation of numpy.pad for " + f"more info." + ) + return _fix_ndimage_mode(mode_translation_dict[mode]) + + +def _fix_ndimage_mode(mode): + # SciPy 1.6.0 introduced grid variants of constant and wrap which + # have less surprising behavior for images. Use these when available + grid_modes = {'constant': 'grid-constant', 'wrap': 'grid-wrap'} + return grid_modes.get(mode, mode) + + +def resize( + image, + output_shape, + order=None, + mode='reflect', + cval=0, + clip=True, + preserve_range=False, + anti_aliasing=None, + anti_aliasing_sigma=None, +): + """Resize image to match a certain size. + + Performs interpolation to up-size or down-size N-dimensional images. Note + that anti-aliasing should be enabled when down-sizing images to avoid + aliasing artifacts. For downsampling with an integer factor also see + `skimage.transform.downscale_local_mean`. + + Parameters + ---------- + image : ndarray + Input image. + output_shape : iterable + Size of the generated output image `(rows, cols[, ...][, dim])`. If + `dim` is not provided, the number of channels is preserved. In case the + number of input channels does not equal the number of output channels a + n-dimensional interpolation is applied. + + Returns + ------- + resized : ndarray + Resized version of the input. + + Other parameters + ---------------- + order : int, optional + The order of the spline interpolation, default is 0 if + image.dtype is bool and 1 otherwise. The order has to be in + the range 0-5. See `skimage.transform.warp` for detail. + mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional + Points outside the boundaries of the input are filled according + to the given mode. Modes match the behaviour of `numpy.pad`. + cval : float, optional + Used in conjunction with mode 'constant', the value outside + the image boundaries. + clip : bool, optional + Whether to clip the output to the range of values of the input image. + This is enabled by default, since higher order interpolation may + produce values outside the given input range. + preserve_range : bool, optional + Whether to keep the original range of values. Otherwise, the input + image is converted according to the conventions of `img_as_float`. + Also see https://scikit-image.org/docs/dev/user_guide/data_types.html + anti_aliasing : bool, optional + Whether to apply a Gaussian filter to smooth the image prior + to downsampling. It is crucial to filter when downsampling + the image to avoid aliasing artifacts. If not specified, it is set to + True when downsampling an image whose data type is not bool. + It is also set to False when using nearest neighbor interpolation + (``order`` == 0) with integer input data type. + anti_aliasing_sigma : {float, tuple of floats}, optional + Standard deviation for Gaussian filtering used when anti-aliasing. + By default, this value is chosen as (s - 1) / 2 where s is the + downsampling factor, where s > 1. For the up-size case, s < 1, no + anti-aliasing is performed prior to rescaling. + + Notes + ----- + Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge + pixels are duplicated during the reflection. As an example, if an array + has values [0, 1, 2] and was padded to the right by four values using + symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it + would be [0, 1, 2, 1, 0, 1, 2]. + + Examples + -------- + >>> from skimage import data + >>> from skimage.transform import resize + >>> image = data.camera() + >>> resize(image, (100, 100)).shape + (100, 100) + + """ + + image, output_shape = _preprocess_resize_output_shape(image, output_shape) + input_shape = image.shape + input_type = image.dtype + + if input_type == np.float16: + image = image.astype(np.float32) + + if anti_aliasing is None: + anti_aliasing = ( + not input_type == bool + and not (np.issubdtype(input_type, np.integer) and order == 0) + and any(x < y for x, y in zip(output_shape, input_shape)) + ) + + if input_type == bool and anti_aliasing: + raise ValueError("anti_aliasing must be False for boolean images") + + factors = np.divide(input_shape, output_shape) + order = _validate_interpolation_order(input_type, order) + if order > 0: + image = convert_to_float(image, preserve_range) + + # Translate modes used by np.pad to those used by scipy.ndimage + ndi_mode = _to_ndimage_mode(mode) + if anti_aliasing: + if anti_aliasing_sigma is None: + anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) + else: + anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like( + factors + ) + if np.any(anti_aliasing_sigma < 0): + raise ValueError( + "Anti-aliasing standard deviation must be " + "greater than or equal to zero" + ) + elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): + warnings.warn( + "Anti-aliasing standard deviation greater than zero but " + "not down-sampling along all axes", + stacklevel=2 + ) + filtered = ndi.gaussian_filter( + image, anti_aliasing_sigma, cval=cval, mode=ndi_mode + ) + else: + filtered = image + + zoom_factors = [1 / f for f in factors] + out = ndi.zoom( + filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True + ) + + _clip_warp_output(image, out, mode, cval, clip) + + return out diff --git a/bioxelnodes/skimage/dtype.py b/bioxelnodes/skimage/dtype.py new file mode 100644 index 0000000..0b69b7b --- /dev/null +++ b/bioxelnodes/skimage/dtype.py @@ -0,0 +1,600 @@ +import warnings +from warnings import warn + +import numpy as np + + +__all__ = [ + 'img_as_float32', + 'img_as_float64', + 'img_as_float', + 'img_as_int', + 'img_as_uint', + 'img_as_ubyte', + 'img_as_bool', + 'dtype_limits', +] + +# Some of these may or may not be aliases depending on architecture & platform +_integer_types = ( + np.int8, + np.byte, + np.int16, + np.short, + np.int32, + np.int64, + np.longlong, + np.int_, + np.intp, + np.intc, + int, + np.uint8, + np.ubyte, + np.uint16, + np.ushort, + np.uint32, + np.uint64, + np.ulonglong, + np.uint, + np.uintp, + np.uintc, +) +_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types} +dtype_range = { + bool: (False, True), + np.bool_: (False, True), + float: (-1, 1), + np.float16: (-1, 1), + np.float32: (-1, 1), + np.float64: (-1, 1), +} + +with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=DeprecationWarning) + + # np.bool8 is a deprecated alias of np.bool_ + if hasattr(np, 'bool8'): + dtype_range[np.bool8] = (False, True) + +dtype_range.update(_integer_ranges) + +_supported_types = list(dtype_range.keys()) + + +def dtype_limits(image, clip_negative=False): + """Return intensity limits, i.e. (min, max) tuple, of the image's dtype. + + Parameters + ---------- + image : ndarray + Input image. + clip_negative : bool, optional + If True, clip the negative range (i.e. return 0 for min intensity) + even if the image dtype allows negative values. + + Returns + ------- + imin, imax : tuple + Lower and upper intensity limits. + """ + imin, imax = dtype_range[image.dtype.type] + if clip_negative: + imin = 0 + return imin, imax + + +def _dtype_itemsize(itemsize, *dtypes): + """Return first of `dtypes` with itemsize greater than `itemsize` + + Parameters + ---------- + itemsize: int + The data type object element size. + + Other Parameters + ---------------- + *dtypes: + Any Object accepted by `np.dtype` to be converted to a data + type object + + Returns + ------- + dtype: data type object + First of `dtypes` with itemsize greater than `itemsize`. + + """ + return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize) + + +def _dtype_bits(kind, bits, itemsize=1): + """Return dtype of `kind` that can store a `bits` wide unsigned int + + Parameters: + kind: str + Data type kind. + bits: int + Desired number of bits. + itemsize: int + The data type object element size. + + Returns + ------- + dtype: data type object + Data type of `kind` that can store a `bits` wide unsigned int + + """ + + s = next( + i + for i in (itemsize,) + (2, 4, 8) + if bits < (i * 8) or (bits == (i * 8) and kind == 'u') + ) + + return np.dtype(kind + str(s)) + + +def _scale(a, n, m, copy=True): + """Scale an array of unsigned/positive integers from `n` to `m` bits. + + Numbers can be represented exactly only if `m` is a multiple of `n`. + + Parameters + ---------- + a : ndarray + Input image array. + n : int + Number of bits currently used to encode the values in `a`. + m : int + Desired number of bits to encode the values in `out`. + copy : bool, optional + If True, allocates and returns new array. Otherwise, modifies + `a` in place. + + Returns + ------- + out : array + Output image array. Has the same kind as `a`. + """ + kind = a.dtype.kind + if n > m and a.max() < 2**m: + mnew = int(np.ceil(m / 2) * 2) + if mnew > m: + dtype = f'int{mnew}' + else: + dtype = f'uint{mnew}' + n = int(np.ceil(n / 2) * 2) + warn( + f'Downcasting {a.dtype} to {dtype} without scaling because max ' + f'value {a.max()} fits in {dtype}', + stacklevel=3, + ) + return a.astype(_dtype_bits(kind, m)) + elif n == m: + return a.copy() if copy else a + elif n > m: + # downscale with precision loss + if copy: + b = np.empty(a.shape, _dtype_bits(kind, m)) + np.floor_divide(a, 2 ** (n - m), out=b, dtype=a.dtype, casting='unsafe') + return b + else: + a //= 2 ** (n - m) + return a + elif m % n == 0: + # exact upscale to a multiple of `n` bits + if copy: + b = np.empty(a.shape, _dtype_bits(kind, m)) + np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype) + return b + else: + a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False) + a *= (2**m - 1) // (2**n - 1) + return a + else: + # upscale to a multiple of `n` bits, + # then downscale with precision loss + o = (m // n + 1) * n + if copy: + b = np.empty(a.shape, _dtype_bits(kind, o)) + np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype) + b //= 2 ** (o - m) + return b + else: + a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False) + a *= (2**o - 1) // (2**n - 1) + a //= 2 ** (o - m) + return a + + +def _convert(image, dtype, force_copy=False, uniform=False): + """ + Convert an image to the requested data-type. + + Warnings are issued in case of precision loss, or when negative values + are clipped during conversion to unsigned integer types (sign loss). + + Floating point values are expected to be normalized and will be clipped + to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or + signed integers respectively. + + Numbers are not shifted to the negative side when converting from + unsigned to signed integer types. Negative values will be clipped when + converting to unsigned integers. + + Parameters + ---------- + image : ndarray + Input image. + dtype : dtype + Target data-type. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + uniform : bool, optional + Uniformly quantize the floating point range to the integer range. + By default (uniform=False) floating point values are scaled and + rounded to the nearest integers, which minimizes back and forth + conversion errors. + + .. versionchanged:: 0.15 + ``_convert`` no longer warns about possible precision or sign + information loss. See discussions on these warnings at: + https://github.com/scikit-image/scikit-image/issues/2602 + https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228 + https://github.com/scikit-image/scikit-image/pull/3575 + + References + ---------- + .. [1] DirectX data conversion rules. + https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx + .. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25", + pp 7-8. Khronos Group, 2010. + .. [3] Proper treatment of pixels as integers. A.W. Paeth. + In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990. + .. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels", + pp 47-57. Morgan Kaufmann, 1998. + + """ + image = np.asarray(image) + dtypeobj_in = image.dtype + if dtype is np.floating: + dtypeobj_out = np.dtype('float64') + else: + dtypeobj_out = np.dtype(dtype) + dtype_in = dtypeobj_in.type + dtype_out = dtypeobj_out.type + kind_in = dtypeobj_in.kind + kind_out = dtypeobj_out.kind + itemsize_in = dtypeobj_in.itemsize + itemsize_out = dtypeobj_out.itemsize + + # Below, we do an `issubdtype` check. Its purpose is to find out + # whether we can get away without doing any image conversion. This happens + # when: + # + # - the output and input dtypes are the same or + # - when the output is specified as a type, and the input dtype + # is a subclass of that type (e.g. `np.floating` will allow + # `float32` and `float64` arrays through) + + if np.issubdtype(dtype_in, dtype): + if force_copy: + image = image.copy() + return image + + if not (dtype_in in _supported_types and dtype_out in _supported_types): + raise ValueError(f'Cannot convert from {dtypeobj_in} to ' f'{dtypeobj_out}.') + + if kind_in in 'ui': + imin_in = np.iinfo(dtype_in).min + imax_in = np.iinfo(dtype_in).max + if kind_out in 'ui': + imin_out = np.iinfo(dtype_out).min + imax_out = np.iinfo(dtype_out).max + + # any -> binary + if kind_out == 'b': + return image > dtype_in(dtype_range[dtype_in][1] / 2) + + # binary -> any + if kind_in == 'b': + result = image.astype(dtype_out) + if kind_out != 'f': + result *= dtype_out(dtype_range[dtype_out][1]) + return result + + # float -> any + if kind_in == 'f': + if kind_out == 'f': + # float -> float + return image.astype(dtype_out) + + if np.min(image) < -1.0 or np.max(image) > 1.0: + raise ValueError("Images of type float must be between -1 and 1.") + # floating point -> integer + # use float type that can represent output integer type + computation_type = _dtype_itemsize( + itemsize_out, dtype_in, np.float32, np.float64 + ) + + if not uniform: + if kind_out == 'u': + image_out = np.multiply(image, imax_out, dtype=computation_type) + else: + image_out = np.multiply( + image, (imax_out - imin_out) / 2, dtype=computation_type + ) + image_out -= 1.0 / 2.0 + np.rint(image_out, out=image_out) + np.clip(image_out, imin_out, imax_out, out=image_out) + elif kind_out == 'u': + image_out = np.multiply(image, imax_out + 1, dtype=computation_type) + np.clip(image_out, 0, imax_out, out=image_out) + else: + image_out = np.multiply( + image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type + ) + np.floor(image_out, out=image_out) + np.clip(image_out, imin_out, imax_out, out=image_out) + return image_out.astype(dtype_out) + + # signed/unsigned int -> float + if kind_out == 'f': + # use float type that can exactly represent input integers + computation_type = _dtype_itemsize( + itemsize_in, dtype_out, np.float32, np.float64 + ) + + if kind_in == 'u': + # using np.divide or np.multiply doesn't copy the data + # until the computation time + image = np.multiply(image, 1.0 / imax_in, dtype=computation_type) + # DirectX uses this conversion also for signed ints + # if imin_in: + # np.maximum(image, -1.0, out=image) + elif kind_in == 'i': + # From DirectX conversions: + # The most negative value maps to -1.0f + # Every other value is converted to a float (call it c) + # and then result = c * (1.0f / (2⁽ⁿ⁻¹⁾-1)). + + image = np.multiply(image, 1.0 / imax_in, dtype=computation_type) + np.maximum(image, -1.0, out=image) + + else: + image = np.add(image, 0.5, dtype=computation_type) + image *= 2 / (imax_in - imin_in) + + return np.asarray(image, dtype_out) + + # unsigned int -> signed/unsigned int + if kind_in == 'u': + if kind_out == 'i': + # unsigned int -> signed int + image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1) + return image.view(dtype_out) + else: + # unsigned int -> unsigned int + return _scale(image, 8 * itemsize_in, 8 * itemsize_out) + + # signed int -> unsigned int + if kind_out == 'u': + image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out) + result = np.empty(image.shape, dtype_out) + np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe') + return result + + # signed int -> signed int + if itemsize_in > itemsize_out: + return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1) + + image = image.astype(_dtype_bits('i', itemsize_out * 8)) + image -= imin_in + image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False) + image += imin_out + return image.astype(dtype_out) + + +def convert(image, dtype, force_copy=False, uniform=False): + warn( + "The use of this function is discouraged as its behavior may change " + "dramatically in scikit-image 1.0. This function will be removed " + "in scikit-image 1.0.", + FutureWarning, + stacklevel=2, + ) + return _convert(image=image, dtype=dtype, force_copy=force_copy, uniform=uniform) + + +if _convert.__doc__ is not None: + convert.__doc__ = ( + _convert.__doc__ + + """ + + Warns + ----- + FutureWarning: + .. versionadded:: 0.17 + + The use of this function is discouraged as its behavior may change + dramatically in scikit-image 1.0. This function will be removed + in scikit-image 1.0. + """ + ) + + +def img_as_float32(image, force_copy=False): + """Convert an image to single-precision (32-bit) floating point format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of float32 + Output image. + + Notes + ----- + The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when + converting from unsigned or signed datatypes, respectively. + If the input image has a float type, intensity values are not modified + and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0]. + + """ + return _convert(image, np.float32, force_copy) + + +def img_as_float64(image, force_copy=False): + """Convert an image to double-precision (64-bit) floating point format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of float64 + Output image. + + Notes + ----- + The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when + converting from unsigned or signed datatypes, respectively. + If the input image has a float type, intensity values are not modified + and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0]. + + """ + return _convert(image, np.float64, force_copy) + + +def img_as_float(image, force_copy=False): + """Convert an image to floating point format. + + This function is similar to `img_as_float64`, but will not convert + lower-precision floating point arrays to `float64`. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of float + Output image. + + Notes + ----- + The range of a floating point image is [0.0, 1.0] or [-1.0, 1.0] when + converting from unsigned or signed datatypes, respectively. + If the input image has a float type, intensity values are not modified + and can be outside the ranges [0.0, 1.0] or [-1.0, 1.0]. + + """ + return _convert(image, np.floating, force_copy) + + +def img_as_uint(image, force_copy=False): + """Convert an image to 16-bit unsigned integer format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of uint16 + Output image. + + Notes + ----- + Negative input values will be clipped. + Positive values are scaled between 0 and 65535. + + """ + return _convert(image, np.uint16, force_copy) + + +def img_as_int(image, force_copy=False): + """Convert an image to 16-bit signed integer format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of int16 + Output image. + + Notes + ----- + The values are scaled between -32768 and 32767. + If the input data-type is positive-only (e.g., uint8), then + the output image will still only have positive values. + + """ + return _convert(image, np.int16, force_copy) + + +def img_as_ubyte(image, force_copy=False): + """Convert an image to 8-bit unsigned integer format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of ubyte (uint8) + Output image. + + Notes + ----- + Negative input values will be clipped. + Positive values are scaled between 0 and 255. + + """ + return _convert(image, np.uint8, force_copy) + + +def img_as_bool(image, force_copy=False): + """Convert an image to boolean format. + + Parameters + ---------- + image : ndarray + Input image. + force_copy : bool, optional + Force a copy of the data, irrespective of its current dtype. + + Returns + ------- + out : ndarray of bool (`bool_`) + Output image. + + Notes + ----- + The upper half of the input dtype's positive range is True, and the lower + half is False. All negative values (if present) are False. + + """ + return _convert(image, bool, force_copy) diff --git a/bioxelnodes/utils.py b/bioxelnodes/utils.py index 360d75d..c8928ef 100644 --- a/bioxelnodes/utils.py +++ b/bioxelnodes/utils.py @@ -1,5 +1,8 @@ import bpy import mathutils +from pathlib import Path +import pyopenvdb as vdb +from uuid import uuid4 def get_type(cls): @@ -89,9 +92,34 @@ def hide_in_ray(obj): obj.visible_shadow = False +def save_vdb(grids, context): + preferences = context.preferences.addons[__package__].preferences + cache_dir = Path(preferences.cache_dir, 'VDBs') + cache_dir.mkdir(parents=True, exist_ok=True) + + vdb_path = Path(cache_dir, f"{uuid4()}.vdb") + print(f"Storing the VDB file ({str(vdb_path)})...") + vdb.write(str(vdb_path), grids=grids) + + return vdb_path + + +def get_container_from_selection(): + containers = [] + for obj in bpy.context.selected_objects: + if get_container(obj): + containers.append(obj) + + return containers + + def get_container(current_obj): if current_obj: - return current_obj if current_obj.get('bioxel_container') else None + if current_obj.get('bioxel_container'): + return current_obj + elif current_obj.get('bioxel_layer'): + parent = current_obj.parent + return parent if parent.get('bioxel_container') else None return None diff --git a/extension/__init__.py b/extension/__init__.py index 9d122ed..986cf2c 100644 --- a/extension/__init__.py +++ b/extension/__init__.py @@ -11,7 +11,7 @@ def register(): auto_load.register() menus.add() bpy.types.Scene.bioxel_layer_dir = bpy.props.StringProperty( - name="Bioxel Layers Directory", + name="Layer Directory", subtype='DIR_PATH', default="//" ) diff --git a/extension/blender_manifest.toml b/extension/blender_manifest.toml index da2d453..83b19e6 100644 --- a/extension/blender_manifest.toml +++ b/extension/blender_manifest.toml @@ -3,54 +3,67 @@ schema_version = "1.0.0" # Example of manifest file for a Blender extension # Change the values according to your extension id = "bioxelnodes" -version = "0.2.2" +version = "0.2.3" name = "Bioxel Nodes" tagline = "For scientific volumetric data visualization in Blender" maintainer = "Ma Nan " # Supported types: "add-on", "theme" type = "add-on" -# Optional: add-ons can list which resources they will require: -# * "files" (for access of any filesystem operations) -# * "network" (for internet access) -# * "clipboard" (to read and/or write the system clipboard) -# * "camera" (to capture photos and videos) -# * "microphone" (to capture audio) -permissions = ["files"] - # Optional link to documentation, support, source files, etc website = "https://omoolab.github.io/BioxelNodes/latest" # Optional list defined by Blender and server, see: -# https://docs.blender.org/manual/en/dev/extensions/tags.html -tags = ["Geometry Nodes", "Render", "Material"] +# https://docs.blender.org/manual/en/dev/advanced/extensions/tags.html +tags = ["Geometry Nodes", "Render"] blender_version_min = "4.2.0" # Optional: maximum supported Blender version # blender_version_max = "5.1.0" # License conforming to https://spdx.org/licenses/ (use "SPDX: prefix) -# https://docs.blender.org/manual/en/dev/extensions/licenses.html -license = ["SPDX:MIT"] +# https://docs.blender.org/manual/en/dev/advanced/extensions/licenses.html +license = [ + "SPDX:MIT", +] # Optional: required by some licenses. -copyright = ["2024 OmooLab"] +copyright = [ + "2024 OmooLab" +] # Optional list of supported platforms. If omitted, the extension will be available in all operating systems. -platforms = ["windows-amd64", "macos-x86_64"] -# Other supported platforms: "windows-arm64", "macos-x86_64" +platforms = ["windows-x64"] +# Other supported platforms: "windows-arm64", "macos-x64" -# TODO: externalpackage to wheels # Optional: bundle 3rd party Python modules. -# https://docs.blender.org/manual/en/dev/extensions/python_wheels.html +# https://docs.blender.org/manual/en/dev/advanced/extensions/python_wheels.html wheels = [ - "./wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl", - "./wheels/SimpleITK-2.3.1-cp311-cp311-macosx_11_0_arm64.whl", + "./wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl" ] -# Optional: build setting. -# https://docs.blender.org/manual/en/dev/extensions/command_line_arguments.html#command-line-args-extension-build + +## Optional: add-ons can list which resources they will require: +## * files (for access of any filesystem operations) +## * network (for internet access) +## * clipboard (to read and/or write the system clipboard) +## * camera (to capture photos and videos) +## * microphone (to capture audio) +## +## If using network, remember to also check `bpy.app.online_access` +## https://docs.blender.org/manual/en/dev/advanced/extensions/addons.html#internet-access +## +## For each permission it is important to also specify the reason why it is required. +## Keep this a single short sentence without a period (.) at the end. +## For longer explanations use the documentation or detail page. +# +[permissions] +files = "Import/export volume data from/to disk" + +# Optional: build settings. +# https://docs.blender.org/manual/en/dev/advanced/extensions/command_line_arguments.html#command-line-args-extension-build # [build] # paths_exclude_pattern = [ -# "/.git/" -# "__pycache__/" -# ] +# "__pycache__/", +# "/.git/", +# "/*.zip", +# ] \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 658fa81..bb06e23 100644 --- a/poetry.lock +++ b/poetry.lock @@ -539,6 +539,38 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] +[[package]] +name = "imageio" +version = "2.34.2" +description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." +optional = false +python-versions = ">=3.8" +files = [ + {file = "imageio-2.34.2-py3-none-any.whl", hash = "sha256:a0bb27ec9d5bab36a9f4835e51b21d2cb099e1f78451441f94687ff3404b79f8"}, + {file = "imageio-2.34.2.tar.gz", hash = "sha256:5c0c0ee8faa018a1c42f649b90395dd4d3bb6187c09053a0cd6f1fdd51bbff5e"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=8.3.2" + +[package.extras] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +build = ["wheel"] +dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] +docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] +ffmpeg = ["imageio-ffmpeg", "psutil"] +fits = ["astropy"] +full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx (<6)", "tifffile", "wheel"] +gdal = ["gdal"] +itk = ["itk"] +linting = ["black", "flake8"] +pillow-heif = ["pillow-heif"] +pyav = ["av"] +test = ["fsspec[github]", "pytest", "pytest-cov"] +tifffile = ["tifffile"] + [[package]] name = "importlib-metadata" version = "7.1.0" @@ -733,6 +765,25 @@ traitlets = ">=5.3" docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] +[[package]] +name = "lazy-loader" +version = "0.4" +description = "Makes it easy to load subpackages and functions on demand." +optional = false +python-versions = ">=3.7" +files = [ + {file = "lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc"}, + {file = "lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +dev = ["changelist (==0.5)"] +lint = ["pre-commit (==3.7.0)"] +test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] + [[package]] name = "markdown" version = "3.6" @@ -1035,6 +1086,24 @@ files = [ {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, ] +[[package]] +name = "networkx" +version = "3.3" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.10" +files = [ + {file = "networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"}, + {file = "networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9"}, +] + +[package.extras] +default = ["matplotlib (>=3.6)", "numpy (>=1.23)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.5)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["myst-nb (>=1.0)", "numpydoc (>=1.7)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=2.0)", "pygraphviz (>=1.12)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + [[package]] name = "numpy" version = "1.26.4" @@ -1141,6 +1210,92 @@ files = [ [package.dependencies] ptyprocess = ">=0.5" +[[package]] +name = "pillow" +version = "10.3.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, + {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, + {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, + {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, + {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, + {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, + {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, + {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, + {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, + {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, + {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, + {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, + {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, + {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, + {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, + {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, + {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, + {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, + {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, + {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, + {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, + {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, + {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, + {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, + {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, + {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, + {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, + {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, + {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, + {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, + {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, + {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, + {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + [[package]] name = "platformdirs" version = "4.2.1" @@ -1680,6 +1835,96 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "scikit-image" +version = "0.24.0" +description = "Image processing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit_image-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb3bc0264b6ab30b43c4179ee6156bc18b4861e78bb329dd8d16537b7bbf827a"}, + {file = "scikit_image-0.24.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9c7a52e20cdd760738da38564ba1fed7942b623c0317489af1a598a8dedf088b"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93f46e6ce42e5409f4d09ce1b0c7f80dd7e4373bcec635b6348b63e3c886eac8"}, + {file = "scikit_image-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39ee0af13435c57351a3397eb379e72164ff85161923eec0c38849fecf1b4764"}, + {file = "scikit_image-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:7ac7913b028b8aa780ffae85922894a69e33d1c0bf270ea1774f382fe8bf95e7"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:272909e02a59cea3ed4aa03739bb88df2625daa809f633f40b5053cf09241831"}, + {file = "scikit_image-0.24.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:190ebde80b4470fe8838764b9b15f232a964f1a20391663e31008d76f0c696f7"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c98cc695005faf2b79904e4663796c977af22586ddf1b12d6af2fa22842dc2"}, + {file = "scikit_image-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa27b3a0dbad807b966b8db2d78da734cb812ca4787f7fbb143764800ce2fa9c"}, + {file = "scikit_image-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:dacf591ac0c272a111181afad4b788a27fe70d213cfddd631d151cbc34f8ca2c"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6fccceb54c9574590abcddc8caf6cefa57c13b5b8b4260ab3ff88ad8f3c252b3"}, + {file = "scikit_image-0.24.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ccc01e4760d655aab7601c1ba7aa4ddd8b46f494ac46ec9c268df6f33ccddf4c"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18836a18d3a7b6aca5376a2d805f0045826bc6c9fc85331659c33b4813e0b563"}, + {file = "scikit_image-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8579bda9c3f78cb3b3ed8b9425213c53a25fa7e994b7ac01f2440b395babf660"}, + {file = "scikit_image-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:82ab903afa60b2da1da2e6f0c8c65e7c8868c60a869464c41971da929b3e82bc"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef04360eda372ee5cd60aebe9be91258639c86ae2ea24093fb9182118008d009"}, + {file = "scikit_image-0.24.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e9aadb442360a7e76f0c5c9d105f79a83d6df0e01e431bd1d5757e2c5871a1f3"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e37de6f4c1abcf794e13c258dc9b7d385d5be868441de11c180363824192ff7"}, + {file = "scikit_image-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4688c18bd7ec33c08d7bf0fd19549be246d90d5f2c1d795a89986629af0a1e83"}, + {file = "scikit_image-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:56dab751d20b25d5d3985e95c9b4e975f55573554bd76b0aedf5875217c93e69"}, + {file = "scikit_image-0.24.0.tar.gz", hash = "sha256:5d16efe95da8edbeb363e0c4157b99becbd650a60b77f6e3af5768b66cf007ab"}, +] + +[package.dependencies] +imageio = ">=2.33" +lazy-loader = ">=0.4" +networkx = ">=2.8" +numpy = ">=1.23" +packaging = ">=21" +pillow = ">=9.1" +scipy = ">=1.9" +tifffile = ">=2022.8.12" + +[package.extras] +build = ["Cython (>=3.0.4)", "build", "meson-python (>=0.15)", "ninja", "numpy (>=2.0.0rc1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.8)", "wheel"] +data = ["pooch (>=1.6.0)"] +developer = ["ipython", "pre-commit", "tomli"] +docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.6)", "myst-parser", "numpydoc (>=1.7)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.15.2)", "pytest-doctestplus", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] +optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.6)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] +test = ["asv", "numpydoc (>=1.7)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-doctestplus", "pytest-faulthandler", "pytest-localserver"] + +[[package]] +name = "scipy" +version = "1.14.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7e911933d54ead4d557c02402710c2396529540b81dd554fc1ba270eb7308484"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:687af0a35462402dd851726295c1a5ae5f987bd6e9026f52e9505994e2f84ef6"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:07e179dc0205a50721022344fb85074f772eadbda1e1b3eecdc483f8033709b7"}, + {file = "scipy-1.14.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a9c9a9b226d9a21e0a208bdb024c3982932e43811b62d202aaf1bb59af264b1"}, + {file = "scipy-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076c27284c768b84a45dcf2e914d4000aac537da74236a0d45d82c6fa4b7b3c0"}, + {file = "scipy-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42470ea0195336df319741e230626b6225a740fd9dce9642ca13e98f667047c0"}, + {file = "scipy-1.14.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:176c6f0d0470a32f1b2efaf40c3d37a24876cebf447498a4cefb947a79c21e9d"}, + {file = "scipy-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:ad36af9626d27a4326c8e884917b7ec321d8a1841cd6dacc67d2a9e90c2f0359"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6d056a8709ccda6cf36cdd2eac597d13bc03dba38360f418560a93050c76a16e"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:f0a50da861a7ec4573b7c716b2ebdcdf142b66b756a0d392c236ae568b3a93fb"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:94c164a9e2498e68308e6e148646e486d979f7fcdb8b4cf34b5441894bdb9caf"}, + {file = "scipy-1.14.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a7d46c3e0aea5c064e734c3eac5cf9eb1f8c4ceee756262f2c7327c4c2691c86"}, + {file = "scipy-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eee2989868e274aae26125345584254d97c56194c072ed96cb433f32f692ed8"}, + {file = "scipy-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3154691b9f7ed73778d746da2df67a19d046a6c8087c8b385bc4cdb2cfca74"}, + {file = "scipy-1.14.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c40003d880f39c11c1edbae8144e3813904b10514cd3d3d00c277ae996488cdb"}, + {file = "scipy-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:5b083c8940028bb7e0b4172acafda6df762da1927b9091f9611b0bcd8676f2bc"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff2438ea1330e06e53c424893ec0072640dac00f29c6a43a575cbae4c99b2b9"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bbc0471b5f22c11c389075d091d3885693fd3f5e9a54ce051b46308bc787e5d4"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:64b2ff514a98cf2bb734a9f90d32dc89dc6ad4a4a36a312cd0d6327170339eb0"}, + {file = "scipy-1.14.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:7d3da42fbbbb860211a811782504f38ae7aaec9de8764a9bef6b262de7a2b50f"}, + {file = "scipy-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d91db2c41dd6c20646af280355d41dfa1ec7eead235642178bd57635a3f82209"}, + {file = "scipy-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a01cc03bcdc777c9da3cfdcc74b5a75caffb48a6c39c8450a9a05f82c4250a14"}, + {file = "scipy-1.14.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:65df4da3c12a2bb9ad52b86b4dcf46813e869afb006e58be0f516bc370165159"}, + {file = "scipy-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:4c4161597c75043f7154238ef419c29a64ac4a7c889d588ea77690ac4d0d9b20"}, + {file = "scipy-1.14.0.tar.gz", hash = "sha256:b5923f48cb840380f9854339176ef21763118a7300a88203ccd0bdd26e58527b"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "simpleitk" version = "2.3.1" @@ -1744,6 +1989,23 @@ pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] +[[package]] +name = "tifffile" +version = "2024.6.18" +description = "Read and write TIFF files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tifffile-2024.6.18-py3-none-any.whl", hash = "sha256:67299c0445fc47463bbc71f3cb4676da2ab0242b0c6c6542a0680801b4b97d8a"}, + {file = "tifffile-2024.6.18.tar.gz", hash = "sha256:57e0d2a034bcb6287ea3155d8716508dfac86443a257f6502b57ee7f8a33b3b6"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] + [[package]] name = "tomli" version = "2.0.1" @@ -1963,4 +2225,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = "~=3.10.0" -content-hash = "cd962cdf6abefe2eda6b33aa1ccaab485de7c30aaf4f8d9515c49f45a8dcaefb" +content-hash = "e9980867106af8e9e7524ff319d3c0785906b359022595732260fd4b3ca135ca" diff --git a/pyproject.toml b/pyproject.toml index 232a7d3..d32d96e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "bioxelnodes" -version = "0.2.2" +version = "0.2.3" description = "" authors = ["Ma Nan "] license = "MIT" @@ -10,6 +10,7 @@ readme = "README.md" python = "~=3.10.0" bpy = "~=4.0" simpleitk = "^2.3.1" +scikit-image = "^0.24.0" [tool.poetry.group.dev.dependencies] ipykernel = "^6.25.2"