From 854af4c3bcfdeae5997b9b112eb1d03c790fe7bb Mon Sep 17 00:00:00 2001 From: Ma Nan Date: Mon, 22 Jul 2024 08:51:12 +0800 Subject: [PATCH] feat: support more format feat: support .mrc .map feat: support .ome.tif feat: improved user experience for importing feat: better contrast algorithm --- .github/workflows/upload-assets.yml | 10 +- bioxelnodes/__init__.py | 4 +- .../assets/Nodes/BioxelNodes_4.1.blend | 4 +- bioxelnodes/exceptions.py | 4 + bioxelnodes/io.py | 1322 +++++++++-------- bioxelnodes/menus.py | 10 +- bioxelnodes/operators.py | 6 +- bioxelnodes/parse.py | 369 +++++ bioxelnodes/requirements.txt | 4 +- bioxelnodes/utils.py | 19 +- extension/__init__.py | 2 + extension/blender_manifest.toml | 10 +- mkdocs.yml | 2 +- poetry.lock | 243 ++- pyproject.toml | 4 +- 15 files changed, 1358 insertions(+), 655 deletions(-) create mode 100644 bioxelnodes/exceptions.py create mode 100644 bioxelnodes/parse.py diff --git a/.github/workflows/upload-assets.yml b/.github/workflows/upload-assets.yml index fa506cb..97884f6 100644 --- a/.github/workflows/upload-assets.yml +++ b/.github/workflows/upload-assets.yml @@ -88,7 +88,15 @@ jobs: python-version: 3.11 - name: Zip Extension run: | - pip download SimpleITK==2.3.1 --dest bioxelnodes/wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 + pip download SimpleITK==2.3.1 --dest wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 + pip download pyometiff==1.0.0 --dest wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 + pip download mrcfile==1.5.1 --dest wheels --only-binary=:all: --python-version=3.11 --platform=win_amd64 + mkdir bioxelnodes/wheels + cp wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl bioxelnodes/wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl + cp wheels/lxml-5.2.2-cp311-cp311-win_amd64.whl bioxelnodes/wheels/lxml-5.2.2-cp311-cp311-win_amd64.whl + cp wheels/tifffile-2024.7.21-py3-none-any.whl bioxelnodes/wheels/tifffile-2024.7.21-py3-none-any.whl + cp wheels/pyometiff-1.0.0-py3-none-any.whl bioxelnodes/wheels/pyometiff-1.0.0-py3-none-any.whl + cp wheels/mrcfile-1.5.1-py2.py3-none-any.whl bioxelnodes/wheels/mrcfile-1.5.1-py2.py3-none-any.whl rm -r bioxelnodes/externalpackage cp extension/__init__.py bioxelnodes/__init__.py cp extension/preferences.py bioxelnodes/preferences.py diff --git a/bioxelnodes/__init__.py b/bioxelnodes/__init__.py index 754b6f0..457bb17 100644 --- a/bioxelnodes/__init__.py +++ b/bioxelnodes/__init__.py @@ -9,7 +9,7 @@ "author": "Ma Nan", "description": "", "blender": (4, 1, 0), - "version": (0, 2, 7), + "version": (0, 2, 8), "location": "File -> Import", "warning": "", "category": "Node" @@ -20,6 +20,8 @@ def register(): auto_load.register() + bpy.types.WindowManager.bioxelnodes_progress_factor = bpy.props.FloatProperty() + bpy.types.WindowManager.bioxelnodes_progress_text = bpy.props.StringProperty() menus.add() diff --git a/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend b/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend index df5fb34..016c4cf 100644 --- a/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend +++ b/bioxelnodes/assets/Nodes/BioxelNodes_4.1.blend @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40dc55e26be78cb4b175df616bbba5d088c32ae027720fd2bb3f5f17dbdfef82 -size 6624975 +oid sha256:b219a6f005718d8223766215a598ebde9b646c3960fb298d72ffc864791f3c3a +size 6691383 diff --git a/bioxelnodes/exceptions.py b/bioxelnodes/exceptions.py new file mode 100644 index 0000000..f1846c0 --- /dev/null +++ b/bioxelnodes/exceptions.py @@ -0,0 +1,4 @@ +class CancelledByUser(Exception): + def __init__(self): + message = 'Cancelled by user' + super().__init__(message) diff --git a/bioxelnodes/io.py b/bioxelnodes/io.py index f1de20d..32451d2 100644 --- a/bioxelnodes/io.py +++ b/bioxelnodes/io.py @@ -1,60 +1,30 @@ import math import bpy import shutil +import threading + + from bpy_extras.io_utils import axis_conversion import pyopenvdb as vdb import numpy as np from pathlib import Path -from uuid import uuid4 import mathutils import random from . import skimage as ski from .nodes import custom_nodes +from .exceptions import CancelledByUser from .props import BIOXELNODES_Series -from .utils import (calc_bbox_verts, get_all_layers, get_container_from_selection, get_layer, get_text_index_str, - get_nodes_by_type, hide_in_ray, lock_transform, move_node_between_nodes, move_node_to_node, save_vdb, save_vdbs, select_object, show_message) +from .parse import DICOM_EXTS, FH_EXTS, SUPPORT_EXTS, get_ext, parse_volumetric_data +from .utils import (calc_bbox_verts, get_all_layers, get_container_from_selection, get_layer, + get_nodes_by_type, hide_in_ray, lock_transform, move_node_between_nodes, move_node_to_node, progress_update, progress_bar, save_vdb, save_vdbs, select_object) + try: import SimpleITK as sitk except: ... -SUPPORT_EXTS = ['', '.dcm', '.DCM', '.DICOM', - '.bmp', '.BMP', - '.PIC', '.pic', - '.gipl', '.gipl.gz', - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.lsm', '.LSM', - '.tif', '.TIF', '.tiff', '.TIFF', - '.mnc', '.MNC', - '.mrc', '.rec', - '.mha', '.mhd', - '.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5', - '.nia', '.nii', '.nii.gz', '.hdr', '.img', '.img.gz', - '.nrrd', '.nhdr', - '.png', '.PNG', - '.vtk'] - -SEQUENCE_EXTS = ['.dcm', '.DCM', '.DICOM', - '.bmp', '.BMP', - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.tif', '.TIF', '.tiff', '.TIFF', - '.png', '.PNG'] - -DICOM_EXTS = ['', '.dcm', '.DCM', '.DICOM'] - -FH_EXTS = ['', '.dcm', '.DCM', '.DICOM', - '.gipl', '.gipl.gz', - '.mnc', '.MNC', - '.mrc', '.rec', - '.mha', '.mhd', - '.nia', '.nii', '.nii.gz', '.hdr', '.img', '.img.gz', - '.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5', - '.nrrd', '.nhdr', - '.vtk', - '.gz'] - def get_layer_shape(bioxel_size: float, orig_shape: tuple, orig_spacing: tuple): shape = (int(orig_shape[0] / bioxel_size * orig_spacing[0]), @@ -74,339 +44,697 @@ def get_layer_size(shape: tuple, bioxel_size: float, scale: float = 1.0): return size -def get_ext(filepath: str) -> str: - file_path = Path(filepath) - if file_path.name.endswith(".nii.gz"): - return ".nii.gz" - elif file_path.name.endswith(".img.gz"): - return ".img.gz" - elif file_path.name.endswith(".gipl.gz"): - return ".gipl.gz" - else: - return file_path.suffix - - -def get_sequence_name(filepath: str) -> str: - ext = get_ext(filepath) - filename = Path(filepath).name.removesuffix(ext) - index: str = get_text_index_str(filename) - return filename.removesuffix(index) - - -def get_sequence_index(filepath: str) -> int: - ext = get_ext(filepath) - filename = Path(filepath).name.removesuffix(ext) - index: str = get_text_index_str(filename) - return int(index) if index else 0 - - -def collect_sequence(filepath: str): - file_path = Path(filepath).resolve() - - files = list(file_path.parent.iterdir()) - files = [f for f in files if f.is_file() - and get_ext(file_path) == get_ext(f) - and get_sequence_name(file_path) == get_sequence_name(f)] - - files.sort(key=get_sequence_index) - sequence = [str(f) for f in files] - return sequence - - -def parse_volume_data(filepath: str, series_id=""): - ext = get_ext(filepath) - - if ext in DICOM_EXTS: - dir_path = Path(filepath).resolve().parent - reader = sitk.ImageSeriesReader() - reader.MetaDataDictionaryArrayUpdateOn() - reader.LoadPrivateTagsOn() - series_files = reader.GetGDCMSeriesFileNames( - str(dir_path), series_id) - reader.SetFileNames(series_files) - - itk_volume = reader.Execute() - # for k in reader.GetMetaDataKeys(0): - # v = reader.GetMetaData(0, k) - # print(f'({k}) = = "{v}"') - name = dir_path.name - - elif ext in SEQUENCE_EXTS: - itk_volume = sitk.ReadImage(filepath) - if itk_volume.GetDimension() == 2: - sequence = collect_sequence(filepath) - itk_volume = sitk.ReadImage(sequence) - name = get_sequence_name(filepath) - else: - itk_volume = sitk.ReadImage(filepath) - name = Path(filepath).name.removesuffix(ext) - else: - itk_volume = sitk.ReadImage(filepath) - name = Path(filepath).name.removesuffix(ext) - - # for key in itk_volume.GetMetaDataKeys(): - # print(f"{key},{itk_volume.GetMetaData(key)}") - - if itk_volume.GetDimension() == 3: - itk_volume = sitk.DICOMOrient(itk_volume, 'RAS') - - meta = { - "name": name, - "shape": tuple(itk_volume.GetSize()), - "spacing": tuple(itk_volume.GetSpacing()), - "origin": tuple(itk_volume.GetOrigin()), - "direction": tuple(itk_volume.GetDirection()), - "is_oriented": True - } - - volume = sitk.GetArrayFromImage(itk_volume) - - # transpose ijk to kji - if volume.ndim == 4: - volume = np.transpose(volume, (2, 1, 0, 3)) +""" +ImportVolumetricData + -> ParseVolumetricData -> ImportVolumetricDataDialog +FH_ImportVolumetricData + + start import parse data execute import +""" + + +class ImportVolumetricData(): + bl_options = {'UNDO'} + + filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore + directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore + + read_as = "scalar" + + def execute(self, context): + containers = get_container_from_selection() + + if len(containers) > 0: + bpy.ops.bioxelnodes.parse_volumetric_data('INVOKE_DEFAULT', + filepath=self.filepath, + directory=self.directory, + container=containers[0].name, + read_as=self.read_as) else: - volume = np.transpose(volume) - volume = np.expand_dims(volume, axis=-1) - - else: - # FIXME: not sure... - print(itk_volume.GetDirection()) - direction = np.array(itk_volume.GetDirection()) - direction = direction.reshape(3, 3) if itk_volume.GetDimension() == 3 \ - else direction.reshape(4, 4) - - direction = direction[1:, 1:] - direction = tuple(direction.flatten()) - - meta = { - "name": name, - "shape": tuple(itk_volume.GetSize()[:3]), - "spacing": tuple(itk_volume.GetSpacing()[:3]), - "origin": tuple(itk_volume.GetOrigin()[:3]), - "direction": direction, - "is_oriented": False - } - - volume = sitk.GetArrayFromImage(itk_volume) - - if volume.ndim == 5: - volume = np.transpose(volume, (0, 3, 2, 1, 4)) + bpy.ops.bioxelnodes.parse_volumetric_data('INVOKE_DEFAULT', + filepath=self.filepath, + directory=self.directory, + read_as=self.read_as) + + return {'FINISHED'} + + def invoke(self, context, event): + context.window_manager.fileselect_add(self) + return {'RUNNING_MODAL'} + + +class ImportAsScalarLayer(bpy.types.Operator, ImportVolumetricData): + bl_idname = "bioxelnodes.import_as_scalar_layer" + bl_label = "Import as Scalar" + bl_description = "Import Volumetric Data to Container as Scalar" + read_as = "scalar" + + +class ImportAsLabelLayer(bpy.types.Operator, ImportVolumetricData): + bl_idname = "bioxelnodes.import_as_label_layer" + bl_label = "Import as Label" + bl_description = "Import Volumetric Data to Container as Label" + read_as = "label" + + +try: + class BIOXELNODES_FH_ImportVolumetricData(bpy.types.FileHandler): + bl_idname = "BIOXELNODES_FH_ImportVolumetricData" + bl_label = "File handler for dicom import" + bl_import_operator = "bioxelnodes.parse_volumetric_data" + bl_file_extensions = ";".join(FH_EXTS) + + @classmethod + def poll_drop(cls, context): + return (context.area and context.area.type == 'VIEW_3D') +except: + ... + + +def get_series_ids(self, context): + items = [] + for index, series_id in enumerate(self.series_ids): + items.append(( + series_id.id, + series_id.label, + "", + index + )) + + return items + + +class ParseVolumetricData(bpy.types.Operator): + bl_idname = "bioxelnodes.parse_volumetric_data" + bl_label = "Import Volumetric Data" + bl_description = "Import Volumetric Data as Layer" + bl_options = {'UNDO'} + + meta = {} + thread = None + _timer = None + + progress: bpy.props.FloatProperty(name="Progress", + options={"SKIP_SAVE"}, + default=1) # type: ignore + + filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore + directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore + container: bpy.props.StringProperty() # type: ignore + + read_as: bpy.props.EnumProperty(name="Read as", + default="scalar", + items=[("scalar", "Scalar", ""), + ("label", "Labels", "")]) # type: ignore + + series_id: bpy.props.EnumProperty(name="Select Series", + items=get_series_ids) # type: ignore + + series_ids: bpy.props.CollectionProperty( + type=BIOXELNODES_Series) # type: ignore + + def execute(self, context): + ext = get_ext(self.filepath) + if ext not in SUPPORT_EXTS: + self.report({"WARNING"}, "Not supported extension.") + return {'CANCELLED'} + + print("Collecting Meta Data...") + + def parse_volumetric_data_func(self, context, cancel): + + def progress_callback(factor, text): + if cancel(): + return False + progress_update(context, factor, text) + return True + + try: + volume, meta = parse_volumetric_data(filepath=self.filepath, + series_id=self.series_id, + progress_callback=progress_callback) + except CancelledByUser: + return + + if cancel(): + return + + self.meta = meta + + # Init cancel flag + self.is_cancelled = False + # Create the thread + self.thread = threading.Thread(target=parse_volumetric_data_func, + args=(self, context, lambda: self.is_cancelled)) + + # Start the thread + self.thread.start() + # Add a timmer for modal + self._timer = context.window_manager.event_timer_add(time_step=0.1, + window=context.window) + # Append progress bar to status bar + bpy.types.STATUSBAR_HT_header.append(progress_bar) + + # Start modal handler + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + + def modal(self, context, event): + # Check if user press 'ESC' + if event.type == 'ESC': + self.is_cancelled = True + progress_update(context, 0, "Canceling...") + return {'PASS_THROUGH'} + + # Check if is the timer time + if event.type != 'TIMER': + return {'PASS_THROUGH'} + + # Force update status bar + bpy.context.workspace.status_text_set_internal(None) + + # Check if thread is still running + if self.thread.is_alive(): + return {'PASS_THROUGH'} + + # Release the thread + self.thread.join() + # Remove the timer + context.window_manager.event_timer_remove(self._timer) + # Remove the progress bar from status bar + bpy.types.STATUSBAR_HT_header.remove(progress_bar) + + # Check if thread is cancelled by user + if self.is_cancelled: + self.report({"WARNING"}, "Canncelled by user.") + return {'CANCELLED'} + + # If not canncelled... + for key, value in self.meta.items(): + print(f"{key}: {value}") + + orig_shape = self.meta['shape'] + orig_spacing = self.meta['spacing'] + min_size = min(orig_spacing[0], + orig_spacing[1], orig_spacing[2]) + bioxel_size = max(min_size, 1.0) + + layer_size = get_layer_size(orig_shape, + bioxel_size) + log10 = math.floor(math.log10(max(*layer_size))) + scene_scale = math.pow(10, -log10) + + if self.container: + container = bpy.data.objects[self.container] + container_name = container.name else: - volume = np.transpose(volume, (0, 3, 2, 1)) - volume = np.expand_dims(volume, axis=-1) + container_name = self.meta['name'] - return volume, meta + bpy.ops.bioxelnodes.import_volumetric_data_dialog( + 'INVOKE_DEFAULT', + filepath=self.filepath, + container_name=container_name, + layer_name=self.meta['description'], + orig_shape=orig_shape, + orig_spacing=orig_spacing, + bioxel_size=bioxel_size, + series_id=self.series_id or "", + frame_count=self.meta['frame_count'], + channel_count=self.meta['channel_count'], + container=self.container, + read_as=self.read_as, + scene_scale=scene_scale + ) + + self.report({"INFO"}, "Successfully Readed.") + return {'FINISHED'} + def invoke(self, context, event): + if not self.filepath and not self.directory: + return {'CANCELLED'} -class ImportVolumeDataDialog(bpy.types.Operator): - bl_idname = "bioxelnodes.import_volume_data_dialog" - bl_label = "Volumetric Data as Bioxel Layer" - bl_description = "Import Volumetric Data as Bioxel Layer" + ext = get_ext(self.filepath) + + # Series Selection + if ext in DICOM_EXTS: + dir_path = Path(self.filepath).parent + reader = sitk.ImageSeriesReader() + reader.MetaDataDictionaryArrayUpdateOn() + reader.LoadPrivateTagsOn() + + series_ids = reader.GetGDCMSeriesIDs(str(dir_path)) + + for series_id in series_ids: + series_files = reader.GetGDCMSeriesFileNames( + str(dir_path), series_id) + single = sitk.ImageFileReader() + single.SetFileName(series_files[0]) + single.LoadPrivateTagsOn() + single.ReadImageInformation() + + def get_meta(key): + try: + stirng = single.GetMetaData(key).removesuffix(" ") + if stirng in ["No study description", + "No series description"]: + return "Unknown" + else: + return stirng + except: + return "Unknown" + + study_description = get_meta("0008|1030") + series_description = get_meta("0008|103e") + series_modality = get_meta("0008|0060") + size_x = get_meta("0028|0011") + size_y = get_meta("0028|0010") + count = get_meta("0020|0013") + + # some series image count = 0 ???? + if int(count) == 0: + continue + + series_item = self.series_ids.add() + series_item.id = series_id + series_item.label = "{:<20} {:>1}".format(f"{study_description}>{series_description}({series_modality})", + f"({size_x}x{size_y})x{count}") + + if len(series_ids) > 1: + context.window_manager.invoke_props_dialog(self, + width=400, + title="Which series to import?") + return {'RUNNING_MODAL'} + else: + self.series_id = series_ids[0] + + self.execute(context) + return {'RUNNING_MODAL'} + + def draw(self, context): + layout = self.layout + layout.label( + text='Detect multi-series in DICOM, pick one') + layout.prop(self, "series_id") + + +class ImportVolumetricDataDialog(bpy.types.Operator): + bl_idname = "bioxelnodes.import_volumetric_data_dialog" + bl_label = "Import Volumetric Data" + bl_description = "Import Volumetric Data as Layer" bl_options = {'UNDO'} - filepath: bpy.props.StringProperty( - subtype="FILE_PATH" - ) # type: ignore + layers = [] + thread = None + _timer = None + + filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore container_name: bpy.props.StringProperty( - name="Container Name" - ) # type: ignore + name="Container Name") # type: ignore - layer_name: bpy.props.StringProperty( - name="Layer Name", - ) # type: ignore + layer_name: bpy.props.StringProperty(name="Layer Name") # type: ignore series_id: bpy.props.StringProperty() # type: ignore container: bpy.props.StringProperty() # type: ignore - read_as: bpy.props.EnumProperty( - name="Read as", - default="scalar", - items=[("scalar", "Scalar", ""), - ("label", "Labels", "")] - ) # type: ignore + frame_count: bpy.props.IntProperty() # type: ignore - bioxel_size: bpy.props.FloatProperty( - name="Bioxel Size (Larger size means small resolution)", - soft_min=0.1, soft_max=10.0, - min=0.1, max=1e2, - default=1, - ) # type: ignore + channel_count: bpy.props.IntProperty() # type: ignore - orig_spacing: bpy.props.FloatVectorProperty( - name="Original Spacing", - default=(1, 1, 1), - ) # type: ignore + read_as: bpy.props.EnumProperty(name="Read as", + default="scalar", + items=[("scalar", "Scalar", ""), + ("label", "Labels", "")]) # type: ignore - orig_shape: bpy.props.IntVectorProperty( - name="Original Shape", - default=(100, 100, 100) - ) # type: ignore + bioxel_size: bpy.props.FloatProperty(name="Bioxel Size (Larger size means small resolution)", + soft_min=0.1, soft_max=10.0, + min=0.1, max=1e2, + default=1) # type: ignore - scene_scale: bpy.props.FloatProperty( - name="Scene Scale (Bioxel Unit pre Blender Unit)", - soft_min=0.0001, soft_max=10.0, - min=1e-6, max=1e6, - default=0.01, - ) # type: ignore + orig_spacing: bpy.props.FloatVectorProperty(name="Original Spacing", + default=(1, 1, 1)) # type: ignore - is_time_sequence: bpy.props.BoolProperty( - name="Is Time Sequence", - default=False, - ) # type: ignore + orig_shape: bpy.props.IntVectorProperty(name="Original Shape", + default=(100, 100, 100)) # type: ignore - split_channels: bpy.props.BoolProperty( - name="Split Channels", - default=False, - ) # type: ignore + scene_scale: bpy.props.FloatProperty(name="Scene Scale (Bioxel Unit pre Blender Unit)", + soft_min=0.0001, soft_max=10.0, + min=1e-6, max=1e6, + default=0.01) # type: ignore + + as_time_sequence: bpy.props.BoolProperty(name="As Time Sequence", + default=False) # type: ignore + + split_channels: bpy.props.BoolProperty(name="Split Channels", + default=False) # type: ignore def execute(self, context): - is_first_import = len(get_all_layers()) == 0 - volume, meta = parse_volume_data(self.filepath) - container_name = self.container_name or meta['name'] or "Container" - bioxel_size = self.bioxel_size - orig_spacing = self.orig_spacing - - layer_spacing = ( - meta['spacing'][0] / orig_spacing[0] * bioxel_size, - meta['spacing'][1] / orig_spacing[1] * bioxel_size, - meta['spacing'][2] / orig_spacing[2] * bioxel_size - ) + def import_volumetric_data_func(self, context, cancel): + progress_update(context, 0.0, "Parsing Volumetirc Data...") - dtype_index = volume.dtype.num + def progress_callback(factor, text): + if cancel(): + return False + progress_update(context, factor*0.2, text) + return True - layer_shape = get_layer_shape( - bioxel_size, meta['shape'], orig_spacing) + try: + volume, meta = parse_volumetric_data(filepath=self.filepath, + series_id=self.series_id, + progress_callback=progress_callback) + except CancelledByUser: + return + + if cancel(): + return + + layer_shape = get_layer_shape(self.bioxel_size, + meta['shape'], + self.orig_spacing) + layer_dtype = volume.dtype.num + + # After sitk.DICOMOrient(), origin and direction will also orient base on LPS + # so we need to convert them into RAS + mat_lps2ras = axis_conversion(from_forward='-Z', + from_up='-Y', + to_forward='-Z', + to_up='Y').to_4x4() + + mat_location = mathutils.Matrix.Translation( + mathutils.Vector(meta['origin']) + ) - # After sitk.DICOMOrient(), origin and direction will also orient base on LPS - # so we need to convert them into RAS - mat_lps2ras = axis_conversion( - from_forward='-Z', - from_up='-Y', - to_forward='-Z', - to_up='Y' - ).to_4x4() + mat_rotation = mathutils.Matrix( + np.array(meta['direction']).reshape((3, 3)) + ).to_4x4() - mat_location = mathutils.Matrix.Translation( - mathutils.Vector(meta['origin']) - ) + mat_scale = mathutils.Matrix.Scale(self.bioxel_size, + 4) + + layer_transfrom = mat_lps2ras @ mat_location @ mat_rotation @ mat_scale \ + if meta['is_oriented'] else mat_location @ mat_rotation @ mat_scale + + def convert_to_vdb(volume, layer_shape, layer_type, progress_callback=None): + if self.as_time_sequence: + grids_sequence = [] + for f in range(volume.shape[0]): + if cancel(): + raise CancelledByUser + + print(f"Processing Frame {f+1}...") + if progress_callback: + progress_callback(f, volume.shape[0]) + frame = ski.resize(volume[f, :, :, :], + layer_shape, + preserve_range=True, + anti_aliasing=volume.dtype.kind != "b") + + grid = vdb.FloatGrid() + frame = frame.copy().astype(np.float32) + grid.copyFromArray(frame) + grid.transform = vdb.createLinearTransform( + layer_transfrom.transposed()) + grid.name = layer_type + grids_sequence.append([grid]) + + print(f"Saving the Cache...") + vdb_paths = save_vdbs(grids_sequence, context) + + else: + if cancel(): + raise CancelledByUser + print(f"Processing the Data...") + volume = ski.resize(volume, + layer_shape, + preserve_range=True, + anti_aliasing=volume.dtype.kind != "b") - mat_rotation = mathutils.Matrix( - np.array(meta['direction']).reshape((3, 3)) - ).to_4x4() + grid = vdb.FloatGrid() + volume = volume.copy().astype(np.float32) + grid.copyFromArray(volume) + grid.transform = vdb.createLinearTransform( + layer_transfrom.transposed()) + grid.name = layer_type - mat_scale = mathutils.Matrix.Scale( - bioxel_size, 4 - ) + print(f"Saving the Cache...") + vdb_paths = [save_vdb([grid], context)] + + return vdb_paths + + if cancel(): + return - transfrom = mat_lps2ras @ mat_location @ mat_rotation @ mat_scale \ - if meta['is_oriented'] else mat_location @ mat_rotation @ mat_scale + # change shape as sequence or not + if self.as_time_sequence: + if volume.shape[0] == 1: + # channel as frame + volume = volume.transpose(4, 1, 2, 3, 0) + + else: + volume = volume[0, :, :, :, :] + + layers = [] + + if self.read_as == "label": + layer_name = self.layer_name or "Label" + volume = np.amax(volume, -1) + volume = volume.astype(int) + orig_max = int(np.max(volume)) + orig_min = int(np.min(volume)) + progress_step = 0.7/orig_max + + for i in range(orig_max): + if cancel(): + return + + layer_name_i = f"{layer_name}_{i+1}" + progress = 0.2+i*progress_step + progress_update(context, progress, + f"Processing {layer_name_i}...") + + def progress_callback(frame, total): + sub_progress_step = progress_step/total + sub_progress = progress + frame * sub_progress_step + progress_update(context, sub_progress, + f"Processing {layer_name_i} Frame {frame+1}...") + + label = volume == np.full_like(volume, i+1) + try: + filepaths = convert_to_vdb(volume=label, + layer_shape=layer_shape, + layer_type="label", + progress_callback=progress_callback) + except CancelledByUser: + return + + layers.append({"name": layer_name_i, + "filepaths": filepaths, + "type": "label", + "shape": layer_shape, + "transfrom": layer_transfrom, + "dtype": layer_dtype, + "node_type": "BioxelNodes_MaskByLabel", + "scalar_offset": 0, + "orig_min": 0, + "orig_max": 1}) + + elif self.read_as == "scalar": + layer_name = self.layer_name or "Scalar" + # SHOULD NOT change any value! + volume = volume.astype(np.float32) + + if self.split_channels: + progress_step = 0.7/volume.shape[-1] + for i in range(volume.shape[-1]): + if cancel(): + return + + layer_name_i = f"{layer_name}_{i+1}" + progress = 0.2 + i*progress_step + + progress_update(context, progress, + f"Processing {layer_name_i}...") + + def progress_callback(frame, total): + sub_progress_step = progress_step/total + sub_progress = progress + frame * sub_progress_step + progress_update(context, sub_progress, + f"Processing {layer_name_i} Frame {frame+1}...") + + scalar = volume[:, :, :, :, i] \ + if self.as_time_sequence else volume[:, :, :, i] + orig_max = float(np.max(scalar)) + orig_min = float(np.min(scalar)) + + scalar_offset = 0 + if orig_min < 0: + scalar_offset = -orig_min + scalar = scalar + \ + np.full_like(scalar, scalar_offset) + try: + filepaths = convert_to_vdb(volume=scalar, + layer_shape=layer_shape, + layer_type="scalar", + progress_callback=progress_callback) + except CancelledByUser: + return + + layers.append({"name": layer_name_i, + "filepaths": filepaths, + "type": "scalar", + "shape": layer_shape, + "transfrom": layer_transfrom, + "dtype": layer_dtype, + "node_type": "BioxelNodes_MaskByThreshold", + "scalar_offset": scalar_offset, + "orig_min": orig_min, + "orig_max": orig_max}) + + else: + if cancel(): + return + + progress_update(context, 0.2, + f"Processing {layer_name}...") + volume = np.amax(volume, -1) + orig_max = float(np.max(volume)) + orig_min = float(np.min(volume)) + + scalar_offset = 0 + if orig_min < 0: + scalar_offset = -orig_min + volume = volume + np.full_like(volume, scalar_offset) + + def progress_callback(frame, total): + sub_progress_step = 0.7/total + sub_progress = 0.2 + frame * sub_progress_step + progress_update(context, sub_progress, + f"Processing {layer_name} Frame {frame+1}...") + + try: + filepaths = convert_to_vdb(volume=volume, + layer_shape=layer_shape, + layer_type="scalar", + progress_callback=progress_callback) + except CancelledByUser: + return + + layers.append({"name": layer_name, + "filepaths": filepaths, + "type": "scalar", + "shape": layer_shape, + "transfrom": layer_transfrom, + "dtype": layer_dtype, + "node_type": "BioxelNodes_MaskByThreshold", + "scalar_offset": scalar_offset, + "orig_min": orig_min, + "orig_max": orig_max}) + + if cancel(): + return + + self.layers = layers + self.is_first_import = len(get_all_layers()) == 0 + progress_update(context, 0.9, "Creating Layers...") + + self.is_cancelled = False + self.thread = threading.Thread(target=import_volumetric_data_func, + args=(self, context, lambda: self.is_cancelled)) + + self.thread.start() + self._timer = context.window_manager.event_timer_add(time_step=0.1, + window=context.window) + bpy.types.STATUSBAR_HT_header.append(progress_bar) + + context.window_manager.modal_handler_add(self) + return {'RUNNING_MODAL'} + + def modal(self, context, event): + if event.type == 'ESC': + self.is_cancelled = True + progress_update(context, 0, "Canceling...") + return {'PASS_THROUGH'} + + if event.type != 'TIMER': + return {'PASS_THROUGH'} + + bpy.context.workspace.status_text_set_internal(None) + if self.thread.is_alive(): + return {'PASS_THROUGH'} + + self.thread.join() + context.window_manager.event_timer_remove(self._timer) + bpy.types.STATUSBAR_HT_header.remove(progress_bar) + + if self.is_cancelled: + self.report({"WARNING"}, "Canncelled by user.") + return {'CANCELLED'} # Wrapper a Container if not self.container: - # Make transformation - scene_scale = float(self.scene_scale) + container_name = self.container_name or "Container" + # Make transformation # (S)uperior -Z -> Y # (A)osterior Y -> Z - mat_ras2blender = axis_conversion( - from_forward='-Z', - from_up='Y', - to_forward='Y', - to_up='Z' - ).to_4x4() + mat_ras2blender = axis_conversion(from_forward='-Z', + from_up='Y', + to_forward='Y', + to_up='Z').to_4x4() - mat_scene_scale = mathutils.Matrix.Scale( - scene_scale, 4 - ) + mat_scene_scale = mathutils.Matrix.Scale(self.scene_scale, + 4) + + bpy.ops.mesh.primitive_cube_add(enter_editmode=False, + align='WORLD', + location=(0, 0, 0), + scale=(1, 1, 1)) - bpy.ops.mesh.primitive_cube_add( - enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1) - ) container = bpy.context.active_object - bbox_verts = calc_bbox_verts((0, 0, 0), layer_shape) - for index, vert in enumerate(container.data.vertices): - vert.co = transfrom @ mathutils.Vector(bbox_verts[index]) + bbox_verts = calc_bbox_verts((0, 0, 0), self.layers[0]['shape']) + for i, vert in enumerate(container.data.vertices): + vert.co = self.layers[0]['transfrom'] @ mathutils.Vector( + bbox_verts[i]) container.matrix_world = mat_ras2blender @ mat_scene_scale container.name = container_name container.data.name = container_name container['bioxel_container'] = True - container['scene_scale'] = scene_scale bpy.ops.node.new_geometry_nodes_modifier() container_node_group = container.modifiers[0].node_group input_node = get_nodes_by_type(container_node_group, 'NodeGroupInput')[0] - container_node_group.links.remove(input_node.outputs[0].links[0]) + container_node_group.links.remove( + input_node.outputs[0].links[0]) else: container = bpy.data.objects[self.container] container_node_group = container.modifiers[0].node_group - preferences = context.preferences.addons[__package__].preferences - - # TODO: change to transform when 4.2? - loc, rot, sca = transfrom.decompose() - layer_origin = tuple(loc) - layer_rotation = tuple(rot.to_euler()) - - def create_layer(volume, layer_name, layer_shape, layer_type="scalar"): - if volume.ndim == 4: - grids_sequence = [] - for f in range(volume.shape[0]): - print(f"Resampling...") - frame = ski.resize(volume[f, :, :, :], - layer_shape, - preserve_range=True, - anti_aliasing=volume.dtype.kind != "b") - - grid = vdb.FloatGrid() - frame = frame.copy().astype(np.float32) - grid.copyFromArray(frame) - grid.transform = vdb.createLinearTransform( - transfrom.transposed()) - grid.name = layer_type - grids_sequence.append([grid]) - - vdb_paths = save_vdbs(grids_sequence, context) - + for i, layer_info in enumerate(self.layers): + # Read VDB + print(f"Loading the Cache of {layer_info['name']}...") + filepaths = layer_info["filepaths"] + if len(filepaths) > 0: # Read VDB - print(f"Loading the cache to Blender scene...") - files = [{"name": str(vdb_path.name), "name": str(vdb_path.name)} - for vdb_path in vdb_paths] + files = [{"name": str(filepath.name), "name": str(filepath.name)} + for filepath in filepaths] - bpy.ops.object.volume_import(filepath=str(vdb_paths[0]), directory=str(vdb_paths[0].parent), + bpy.ops.object.volume_import(filepath=str(filepaths[0]), directory=str(filepaths[0].parent), files=files, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) - else: - volume = ski.resize(volume, - layer_shape, - preserve_range=True, - anti_aliasing=volume.dtype.kind != "b") - - grid = vdb.FloatGrid() - volume = volume.copy().astype(np.float32) - grid.copyFromArray(volume) - grid.transform = vdb.createLinearTransform( - transfrom.transposed()) - grid.name = layer_type - - vdb_path = save_vdb([grid], context) - - # Read VDB - print(f"Loading the cache to Blender scene...") - bpy.ops.object.volume_import(filepath=str(vdb_path), + bpy.ops.object.volume_import(filepath=str(filepaths[0]), align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) layer = bpy.context.active_object layer.data.sequence_mode = 'REPEAT' # Set props to VDB object - layer.name = layer_name - layer.data.name = layer_name + layer.name = f"{container.name}_{layer_info['name']}" + layer.data.name = f"{container.name}_{layer_info['name']}" lock_transform(layer) hide_in_ray(layer) @@ -417,7 +745,7 @@ def create_layer(volume, layer_name, layer_shape, layer_type="scalar"): layer.data.display.density = 1e-05 layer['bioxel_layer'] = True - layer['bioxel_layer_type'] = layer_type + layer['bioxel_layer_type'] = layer_info['type'] layer.parent = container for collection in layer.users_collection: @@ -426,13 +754,14 @@ def create_layer(volume, layer_name, layer_shape, layer_type="scalar"): for collection in container.users_collection: collection.objects.link(layer) - print(f"Creating layer ...") + print(f"Creating Node for {layer_info['name']}...") bpy.ops.node.new_geometry_nodes_modifier() node_group = layer.modifiers[0].node_group input_node = get_nodes_by_type(node_group, 'NodeGroupInput')[0] - output_node = get_nodes_by_type(node_group, 'NodeGroupOutput')[0] + output_node = get_nodes_by_type( + node_group, 'NodeGroupOutput')[0] to_layer_node = custom_nodes.add_node(node_group, "BioxelNodes__ConvertToLayer") @@ -442,24 +771,30 @@ def create_layer(volume, layer_name, layer_shape, layer_type="scalar"): node_group.links.new(to_layer_node.outputs[0], output_node.inputs[0]) + # TODO: change to transform when 4.2? + loc, rot, sca = layer_info['transfrom'].decompose() + layer_origin = tuple(loc) + layer_rotation = tuple(rot.to_euler()) + # for compatibility to old vdb # to_layer_node.inputs['Not Transfromed'].default_value = True to_layer_node.inputs['Layer ID'].default_value = random.randint(-200000000, 200000000) - to_layer_node.inputs['Data Type'].default_value = dtype_index - to_layer_node.inputs['Bioxel Size'].default_value = bioxel_size - to_layer_node.inputs['Shape'].default_value = layer_shape + to_layer_node.inputs['Bioxel Size'].default_value = self.bioxel_size + to_layer_node.inputs['Data Type'].default_value = layer_info['dtype'] + to_layer_node.inputs['Shape'].default_value = layer_info['shape'] to_layer_node.inputs['Origin'].default_value = layer_origin to_layer_node.inputs['Rotation'].default_value = layer_rotation + to_layer_node.inputs['Scalar Offset'].default_value = layer_info['scalar_offset'] + to_layer_node.inputs['Scalar Min'].default_value = layer_info['orig_min'] + to_layer_node.inputs['Scalar Max'].default_value = layer_info['orig_max'] - move_node_between_nodes(to_layer_node, [input_node, output_node]) + move_node_between_nodes( + to_layer_node, [input_node, output_node]) - return layer - - def create_mask_node(layer, node_type, node_label, offset): mask_node = custom_nodes.add_node(container_node_group, - node_type) - mask_node.label = node_label + layer_info['node_type']) + mask_node.label = layer_info['name'] mask_node.inputs[0].default_value = layer # Connect to output if no output linked @@ -470,122 +805,41 @@ def create_mask_node(layer, node_type, node_label, offset): output_node.inputs[0]) move_node_to_node(mask_node, output_node, (-300, 0)) else: - move_node_to_node(mask_node, output_node, offset) - - # change shape as sequence or not - if self.is_time_sequence: - # 4->5 or 5->5 - if volume.ndim == 4: - # channel as frame - volume = volume.transpose(3, 0, 1, 2) - volume = np.expand_dims(volume, axis=-1) - - else: - # 4->4 or 5->4 - if volume.ndim == 5: - # select frame 0 - volume = volume[0, :, :, :, :] - - if self.read_as == "label": - layer_name = self.layer_name or "Label" - volume = np.amax(volume, -1) - volume = volume.astype(int) - orig_max = int(np.max(volume)) - orig_min = int(np.min(volume)) - - for i in range(orig_max): - label = volume == np.full_like(volume, i+1) - layer_name_i = f"{layer_name}_{i+1}" - - layer = create_layer(volume=label, - layer_name=f"{container_name}_{layer_name_i}", - layer_shape=layer_shape, - layer_type="label") - - mask_node = create_mask_node(layer=layer, - node_type='BioxelNodes_MaskByLabel', - node_label=layer_name_i, - offset=(0, -100 * (i+1))) - - else: - layer_name = self.layer_name or "Scalar" - # SHOULD NOT change any value! - volume = volume.astype(np.float32) - - if self.split_channels: - for i in range(volume.shape[-1]): - scalar = volume[:, :, :, :, - 0] if self.is_time_sequence else volume[:, :, :, i] - orig_max = float(np.max(scalar)) - orig_min = float(np.min(scalar)) - - scalar_offset = 0 - if orig_min < 0: - scalar_offset = -orig_min - scalar = scalar + np.full_like(scalar, scalar_offset) - - layer_name_i = f"{layer_name}_{i+1}" - layer = create_layer(volume=scalar, - layer_name=f"{container_name}_{layer_name_i}", - layer_shape=layer_shape, - layer_type="scalar") - - layer_node_group = layer.modifiers[0].node_group - to_layer_node = layer_node_group.nodes['BioxelNodes__ConvertToLayer'] - to_layer_node.inputs['Scalar Offset'].default_value = scalar_offset - to_layer_node.inputs['Scalar Max'].default_value = orig_max - to_layer_node.inputs['Scalar Min'].default_value = orig_min - - mask_node = create_mask_node(layer=layer, - node_type='BioxelNodes_MaskByThreshold', - node_label=layer_name_i, - offset=(0, -100 * (i+1))) - - else: - volume = np.amax(volume, -1) - orig_max = float(np.max(volume)) - orig_min = float(np.min(volume)) - - scalar_offset = 0 - if orig_min < 0: - scalar_offset = -orig_min - volume = volume + np.full_like(volume, scalar_offset) - - layer = create_layer(volume=volume, - layer_name=f"{container_name}_{layer_name}", - layer_shape=layer_shape, - layer_type="scalar") - - layer_node_group = layer.modifiers[0].node_group - to_layer_node = layer_node_group.nodes['BioxelNodes__ConvertToLayer'] - to_layer_node.inputs['Scalar Offset'].default_value = scalar_offset - to_layer_node.inputs['Scalar Max'].default_value = orig_max - to_layer_node.inputs['Scalar Min'].default_value = orig_min - - mask_node = create_mask_node(layer=layer, - node_type='BioxelNodes_MaskByThreshold', - node_label=layer_name, - offset=(0, -100)) + move_node_to_node(mask_node, output_node, (0, -100 * (i+1))) select_object(container) # Change render setting for better result - if preferences.do_change_render_setting and is_first_import: - bpy.context.scene.render.engine = 'CYCLES' + preferences = context.preferences.addons[__package__].preferences + + if preferences.do_change_render_setting and self.is_first_import: + if bpy.app.version < (4, 2, 0): + bpy.context.scene.render.engine = 'CYCLES' + try: bpy.context.scene.cycles.shading_system = True bpy.context.scene.cycles.volume_bounces = 12 bpy.context.scene.cycles.transparent_max_bounces = 16 bpy.context.scene.cycles.volume_preview_step_rate = 10 bpy.context.scene.cycles.volume_step_rate = 10 + except: + pass + + try: + bpy.context.scene.eevee.use_taa_reprojection = False bpy.context.scene.eevee.volumetric_tile_size = '2' bpy.context.scene.eevee.volumetric_shadow_samples = 128 bpy.context.scene.eevee.volumetric_samples = 256 except: pass - self.report({"INFO"}, "Successfully Imported") + if bpy.app.version >= (4, 2, 0): + try: + bpy.context.scene.eevee.use_volumetric_shadows = True + except: + pass + self.report({"INFO"}, "Successfully Imported") return {'FINISHED'} def invoke(self, context, event): @@ -593,8 +847,8 @@ def invoke(self, context, event): volume_dtype = "Label" elif self.read_as == "scalar": volume_dtype = "Scalar" - title = f"Import as **{volume_dtype}** (Add to Container: {self.container})" \ - if self.container != "" else f"Import as **{volume_dtype}** (Init a Container)" + title = f"As {volume_dtype} Opitons (Add to Container: {self.container})" \ + if self.container != "" else f"As {volume_dtype} Options (Init a Container)" context.window_manager.invoke_props_dialog(self, width=500, title=title) @@ -604,17 +858,14 @@ def draw(self, context): layer_shape = get_layer_shape(self.bioxel_size, self.orig_shape, self.orig_spacing) - layer_size = get_layer_size(layer_shape, - self.bioxel_size, - self.scene_scale) + + orig_shape = tuple(self.orig_shape) bioxel_count = layer_shape[0] * layer_shape[1] * layer_shape[2] - layer_shape_text = f"Shape will be: {str(layer_shape)} {bioxel_count:,} " + layer_shape_text = f"Shape from {str(orig_shape)} to {str(layer_shape)}" if bioxel_count > 100000000: layer_shape_text += "**TOO LARGE!**" - layer_size_text = f"Size will be: ({layer_size[0]:.2f}, {layer_size[1]:.2f}, {layer_size[2]:.2f}) m" - layout = self.layout if self.container == "": layout.prop(self, "container_name") @@ -627,216 +878,33 @@ def draw(self, context): panel.label(text=layer_shape_text) panel = layout.box() - panel.prop(self, "is_time_sequence") + if self.as_time_sequence and self.frame_count == 1: + channel_count = 1 + frame_count = self.channel_count + else: + channel_count = self.channel_count + frame_count = self.frame_count + + import_channel = channel_count if self.split_channels or channel_count == 1 else "combined" + import_frame = frame_count if self.as_time_sequence else "1" + panel.prop(self, "as_time_sequence", + text=f"As Time Sequence (get {frame_count} frames, import {import_frame} frames)") if self.read_as == "scalar": - panel.prop(self, "split_channels") + panel.prop(self, "split_channels", + text=f"Split Channels (get {channel_count} channels, import {import_channel} channels)") if self.container == "": + layer_size = get_layer_size(layer_shape, + self.bioxel_size, + self.scene_scale) + layer_size_text = f"Size will be: ({layer_size[0]:.2f}, {layer_size[1]:.2f}, {layer_size[2]:.2f}) m" panel = layout.box() panel.prop(self, "scene_scale") panel.label(text=layer_size_text) -def get_series_ids(self, context): - items = [] - for index, series_id in enumerate(self.series_ids): - items.append(( - series_id.id, - series_id.label, - "", - index - )) - - return items - - -class ParseVolumeData(bpy.types.Operator): - bl_idname = "bioxelnodes.parse_volume_data" - bl_label = "Volumetric Data as Bioxel Layer" - bl_description = "Import Volumetric Data as Bioxel Layer" - bl_options = {'UNDO'} - - filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore - directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore - container: bpy.props.StringProperty() # type: ignore - - read_as: bpy.props.EnumProperty( - name="Read as", - default="scalar", - items=[("scalar", "Scalar", ""), - ("label", "Labels", "")] - ) # type: ignore - - series_id: bpy.props.EnumProperty( - name="Select Series", - items=get_series_ids - ) # type: ignore - - series_ids: bpy.props.CollectionProperty( - type=BIOXELNODES_Series) # type: ignore - - def execute(self, context): - ext = get_ext(self.filepath) - if ext not in SUPPORT_EXTS: - self.report({"WARNING"}, "Not supported extension.") - return {'CANCELLED'} - - print("Collecting Meta Data...") - volume, meta = parse_volume_data(self.filepath) - - for key, value in meta.items(): - print(f"{key}: {value}") - - if self.read_as == "label": - not_int = volume.dtype.kind != "b" and volume.dtype.kind != "i" and volume.dtype.kind != "u" - too_large = np.max(volume) > 100 - - if not_int or too_large: - self.report( - {"WARNING"}, "This volume data does not looks like label, please check again.") - return {'CANCELLED'} - - # do_orient = ext not in SEQUENCE_EXTS or ext in DICOM_EXTS - - orig_shape = meta['shape'] - orig_spacing = meta['spacing'] - min_size = min(orig_spacing[0], orig_spacing[1], orig_spacing[2]) - bioxel_size = max(min_size, 1.0) - - layer_size = get_layer_size(orig_shape, - bioxel_size) - log10 = math.floor(math.log10(max(*layer_size))) - scene_scale = math.pow(10, -log10) - - if self.container: - container = bpy.data.objects[self.container] - container_name = container.name - else: - container_name = meta['name'] - - bpy.ops.bioxelnodes.import_volume_data_dialog( - 'INVOKE_DEFAULT', - filepath=self.filepath, - container_name=container_name, - orig_shape=orig_shape, - orig_spacing=orig_spacing, - bioxel_size=bioxel_size, - series_id=self.series_id or "", - # do_orient=do_orient, - container=self.container, - read_as=self.read_as, - scene_scale=scene_scale - ) - - self.report({"INFO"}, "Successfully Readed.") - - return {'FINISHED'} - - def modal(self, context, event): - self.execute(context) - return {'FINISHED'} - - def invoke(self, context, event): - if not self.filepath and not self.directory: - return {'CANCELLED'} - - show_message('Parsing volume data, it may take a while...', - 'Please be patient...') - - if get_ext(self.filepath) == '.dcm': - dir_path = Path(self.filepath).parent - reader = sitk.ImageSeriesReader() - reader.MetaDataDictionaryArrayUpdateOn() - reader.LoadPrivateTagsOn() - series_ids = reader.GetGDCMSeriesIDs(str(dir_path)) - - for _id in series_ids: - series_id = self.series_ids.add() - series_id.id = _id - series_id.label = _id - - if len(series_ids) > 1: - context.window_manager.invoke_props_dialog(self, width=400) - return {'RUNNING_MODAL'} - else: - self.series_id = series_ids[0] - - context.window_manager.modal_handler_add(self) - return {'RUNNING_MODAL'} - - def draw(self, context): - layout = self.layout - layout.prop(self, "series_id") - layout.label( - text="Reading image data, it may take a while...") - layout.label( - text="Please be patient...") - - -class ImportVolumeData(): - bl_options = {'UNDO'} - - filepath: bpy.props.StringProperty(subtype="FILE_PATH") # type: ignore - directory: bpy.props.StringProperty(subtype='DIR_PATH') # type: ignore - - read_as = "scalar" - - def execute(self, context): - containers = get_container_from_selection() - - if len(containers) > 0: - bpy.ops.bioxelnodes.parse_volume_data( - 'INVOKE_DEFAULT', - filepath=self.filepath, - directory=self.directory, - container=containers[0].name, - read_as=self.read_as - ) - else: - bpy.ops.bioxelnodes.parse_volume_data( - 'INVOKE_DEFAULT', - filepath=self.filepath, - directory=self.directory, - read_as=self.read_as - ) - - return {'FINISHED'} - - def invoke(self, context, event): - context.window_manager.fileselect_add(self) - return {'RUNNING_MODAL'} - - -class ImportAsScalarLayer(bpy.types.Operator, ImportVolumeData): - bl_idname = "bioxelnodes.import_as_scalar_layer" - bl_label = "Import as Scalar" - bl_description = "Import Volumetric Data to Container as Scalar" - read_as = "scalar" - - -class ImportAsLabelLayer(bpy.types.Operator, ImportVolumeData): - bl_idname = "bioxelnodes.import_as_label_layer" - bl_label = "Import as Label" - bl_description = "Import Volumetric Data to Container as Label" - read_as = "label" - - -try: - class BIOXELNODES_FH_ImportVolumeData(bpy.types.FileHandler): - bl_idname = "BIOXELNODES_FH_ImportVolumeData" - bl_label = "File handler for dicom import" - bl_import_operator = "bioxelnodes.parse_volume_data" - bl_file_extensions = ";".join(FH_EXTS) - - @classmethod - def poll_drop(cls, context): - return (context.area and context.area.type == 'VIEW_3D') -except: - ... - - -class ExportVolumeData(bpy.types.Operator): - bl_idname = "bioxelnodes.export_volume_data" +class ExportVolumetricData(bpy.types.Operator): + bl_idname = "bioxelnodes.export_volumetric_data" bl_label = "Export Layer as VDB" bl_description = "Export Layer as VDB" bl_options = {'UNDO'} diff --git a/bioxelnodes/menus.py b/bioxelnodes/menus.py index fd19074..4f70efa 100644 --- a/bioxelnodes/menus.py +++ b/bioxelnodes/menus.py @@ -3,7 +3,7 @@ from .utils import get_container_from_selection from .operators import (AddPieCutter, AddPlaneCutter, AddCylinderCutter, AddCubeCutter, AddSphereCutter, CombineLabels, ConvertToMesh, InvertScalar, FillByLabel, FillByThreshold, FillByRange, PickBboxWire, PickMesh, PickVolume) -from .io import ExportVolumeData, ImportAsLabelLayer, ImportAsScalarLayer +from .io import ExportVolumetricData, ImportAsLabelLayer, ImportAsScalarLayer from .save import CleanAllCaches, ReLinkNodes, SaveLayers, SaveStagedData @@ -115,7 +115,7 @@ def draw(self, context): layout.operator(FillByLabel.bl_idname) layout.operator(CombineLabels.bl_idname) layout.separator() - layout.operator(ExportVolumeData.bl_idname) + layout.operator(ExportVolumetricData.bl_idname) def TOPBAR_FILE_IMPORT(self, context): @@ -131,7 +131,7 @@ def TOPBAR_FILE_IMPORT(self, context): def TOPBAR_FILE_EXPORT(self, context): layout = self.layout layout.separator() - layout.operator(ExportVolumeData.bl_idname, + layout.operator(ExportVolumetricData.bl_idname, text="Bioxel Layer (.vdb)") @@ -171,8 +171,8 @@ def draw(self, context): def TOPBAR(self, context): layout = self.layout layout.menu(BioxelNodesTopbarMenu.bl_idname) - - + + def add(): bpy.types.TOPBAR_MT_file_import.append(TOPBAR_FILE_IMPORT) bpy.types.TOPBAR_MT_file_export.append(TOPBAR_FILE_EXPORT) diff --git a/bioxelnodes/operators.py b/bioxelnodes/operators.py index 5760139..cc78c7b 100644 --- a/bioxelnodes/operators.py +++ b/bioxelnodes/operators.py @@ -366,7 +366,7 @@ def execute(self, context): def invoke(self, context, event): base_layer = get_layer(bpy.context.active_object) scalar_min = get_layer_meta(base_layer, "Scalar Min") - self.fill_value = scalar_min + self.fill_value = min(scalar_min,0) context.window_manager.invoke_props_dialog(self, width=400) return {'RUNNING_MODAL'} @@ -449,7 +449,7 @@ def execute(self, context): def invoke(self, context, event): base_layer = get_layer(bpy.context.active_object) scalar_min = get_layer_meta(base_layer, "Scalar Min") - self.fill_value = scalar_min + self.fill_value = min(scalar_min,0) context.window_manager.invoke_props_dialog(self, width=400) return {'RUNNING_MODAL'} @@ -533,7 +533,7 @@ def execute(self, context): def invoke(self, context, event): base_layer = get_layer(bpy.context.active_object) scalar_min = get_layer_meta(base_layer, "Scalar Min") - self.fill_value = scalar_min + self.fill_value = min(scalar_min,0) context.window_manager.invoke_props_dialog(self, width=400) return {'RUNNING_MODAL'} diff --git a/bioxelnodes/parse.py b/bioxelnodes/parse.py new file mode 100644 index 0000000..1fdd732 --- /dev/null +++ b/bioxelnodes/parse.py @@ -0,0 +1,369 @@ +from pathlib import Path +import numpy as np + +from .exceptions import CancelledByUser +from .utils import get_text_index_str + +try: + import SimpleITK as sitk + from pyometiff import OMETIFFReader + import mrcfile +except: + ... + + +SUPPORT_EXTS = ['', '.dcm', '.DCM', '.DICOM', '.ima', '.IMA', + '.bmp', '.BMP', + '.PIC', '.pic', + '.gipl', '.gipl.gz', + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.lsm', '.LSM', + '.tif', '.TIF', '.tiff', '.TIFF', + '.mnc', '.MNC', + '.mrc', '.rec', + '.mha', '.mhd', + '.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5', + '.nia', '.nii', '.nii.gz', '.hdr', '.img', '.img.gz', + '.nrrd', '.nhdr', + '.png', '.PNG', + '.vtk', + '.ome.tiff', '.ome.tif', + '.mrc', '.mrc.gz', '.map', '.map.gz'] + +OME_EXTS = ['.ome.tiff', '.ome.tif', + '.tif', '.TIF', '.tiff', '.TIFF'] + +MRC_EXTS = ['.mrc', '.mrc.gz', '.map', '.map.gz'] + +SEQUENCE_EXTS = ['.bmp', '.BMP', + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.tif', '.TIF', '.tiff', '.TIFF', + '.png', '.PNG'] + +DICOM_EXTS = ['', '.dcm', '.DCM', '.DICOM'] + +FH_EXTS = ['', '.dcm', '.DCM', '.DICOM', '.ima', '.IMA', + '.gipl', '.gipl.gz', + '.mnc', '.MNC', + '.mrc', '.rec', + '.mha', '.mhd', + '.nia', '.nii', '.nii.gz', '.hdr', '.img', '.img.gz', + '.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5', + '.nrrd', '.nhdr', + '.vtk', + '.gz'] + + +def get_ext(filepath: str) -> str: + file_path = Path(filepath) + if file_path.name.endswith(".nii.gz"): + return ".nii.gz" + elif file_path.name.endswith(".img.gz"): + return ".img.gz" + elif file_path.name.endswith(".gipl.gz"): + return ".gipl.gz" + elif file_path.name.endswith(".ome.tiff"): + return ".ome.tiff" + elif file_path.name.endswith(".ome.tif"): + return ".ome.tif" + elif file_path.name.endswith(".mrc.gz"): + return ".mrc.gz" + elif file_path.name.endswith(".map.gz"): + return ".map.gz" + else: + return file_path.suffix + + +def get_sequence_name(filepath: str) -> str: + ext = get_ext(filepath) + filename = Path(filepath).name.removesuffix(ext) + index: str = get_text_index_str(filename) + return filename.removesuffix(index) + + +def get_sequence_index(filepath: str) -> int: + ext = get_ext(filepath) + filename = Path(filepath).name.removesuffix(ext) + index: str = get_text_index_str(filename) + return int(index) if index else 0 + + +def collect_sequence(filepath: str): + file_path = Path(filepath).resolve() + + files = list(file_path.parent.iterdir()) + file_dict = {} + for file in files: + if file.is_file() \ + and get_ext(file_path) == get_ext(file) \ + and get_sequence_name(file_path) == get_sequence_name(file): + file_dict[get_sequence_index(file)] = file + + for key in file_dict.copy().keys(): + if not file_dict.get(key+1) \ + and not file_dict.get(key-1): + del file_dict[key] + + file_dict = dict(sorted(file_dict.items())) + sequence = [str(f) for f in file_dict.values()] + + if len(sequence) == 0: + sequence = [str(file_path)] + + return sequence + + +def parse_volumetric_data(filepath: str, series_id="", progress_callback=None): + """Parse any volumetric data to numpy with shap (T,X,Y,Z,C) + + Args: + filepath (str): file path + series_id (str, optional): DICOM series id. Defaults to "". + + Returns: + _type_: _description_ + """ + ext = get_ext(filepath) + + if progress_callback: + progressing = progress_callback(0, "Reading the Data...") + if not progressing: + raise CancelledByUser + + is_sequence = False + if ext in SEQUENCE_EXTS: + sequence = collect_sequence(filepath) + if len(sequence) > 1: + is_sequence = True + + if ext in MRC_EXTS and not is_sequence: + # TODO: much to do with mrc + with mrcfile.open(filepath) as mrc: + volume = mrc.data + # mrc.print_header() + # print(volume.shape) + # print(mrc.voxel_size) + + if mrc.is_single_image(): + volume = np.expand_dims(volume, axis=0) # expend frame + volume = np.expand_dims(volume, axis=-1) # expend Z + volume = np.expand_dims(volume, axis=-1) # expend channel + + elif mrc.is_image_stack(): + volume = np.expand_dims(volume, axis=-1) # expend Z + volume = np.expand_dims(volume, axis=-1) # expend channel + + elif mrc.is_volume(): + volume = np.expand_dims(volume, axis=0) # expend frame + volume = np.expand_dims(volume, axis=-1) # expend channel + + elif mrc.is_volume_stack(): + volume = np.expand_dims(volume, axis=-1) # expend channel + + name = Path(filepath).name.removesuffix(ext).replace(" ", "-") + shape = volume.shape[1:4] + spacing = (mrc.voxel_size.x, mrc.voxel_size.y, mrc.voxel_size.z) + meta = { + "name": name, + "description": "", + "shape": shape, + "spacing": spacing, + "origin": (0, 0, 0), + "direction": (1, 0, 0, 0, 1, 0, 0, 0, 1), + "frame_count": volume.shape[0], + "channel_count": volume.shape[-1], + "is_oriented": False + } + + elif ext in OME_EXTS and not is_sequence: + reader = OMETIFFReader(fpath=filepath) + ome_volume, metadata, xml_metadata = reader.read() + + if progress_callback: + progressing = progress_callback(0.5, "Transpose to 'TXYZC'...") + if not progressing: + raise CancelledByUser + + try: + # print(ome_volume.shape) + # for key in metadata: + # print(f"{key},{metadata[key]}") + ome_order = metadata['DimOrder BF Array'] + except: + ome_order = "TCZYX" + + if ome_volume.ndim == 2: + ome_order = ome_order.replace("T", "")\ + .replace("C", "").replace("Z", "") + bioxel_order = (ome_order.index('X'), + ome_order.index('Y')) + volume = np.transpose(ome_volume, bioxel_order) + volume = np.expand_dims(volume, axis=0) # expend frame + volume = np.expand_dims(volume, axis=-1) # expend Z + volume = np.expand_dims(volume, axis=-1) # expend channel + + elif ome_volume.ndim == 3: + # -> XYZC + ome_order = ome_order.replace("T", "").replace("C", "") + bioxel_order = (ome_order.index('X'), + ome_order.index('Y'), + ome_order.index('Z')) + volume = np.transpose(ome_volume, bioxel_order) + volume = np.expand_dims(volume, axis=0) # expend frame + volume = np.expand_dims(volume, axis=-1) # expend channel + elif ome_volume.ndim == 4: + # -> XYZC + ome_order = ome_order.replace("T", "") + bioxel_order = (ome_order.index('X'), + ome_order.index('Y'), + ome_order.index('Z'), + ome_order.index('C')) + volume = np.transpose(ome_volume, bioxel_order) + volume = np.expand_dims(volume, axis=0) # expend frame + elif ome_volume.ndim == 5: + # -> TXYZC + bioxel_order = (ome_order.index('T'), + ome_order.index('X'), + ome_order.index('Y'), + ome_order.index('Z'), + ome_order.index('C')) + volume = np.transpose(ome_volume, bioxel_order) + + shape = volume.shape[1:4] + + try: + spacing = (metadata['PhysicalSizeX'], + metadata['PhysicalSizeY'], + metadata['PhysicalSizeZ']) + except: + spacing = (1, 1, 1) + + name = Path(filepath).name.removesuffix(ext).replace(" ", "-") + meta = { + "name": name, + "description": "", + "shape": shape, + "spacing": spacing, + "origin": (0, 0, 0), + "direction": (1, 0, 0, 0, 1, 0, 0, 0, 1), + "frame_count": volume.shape[0], + "channel_count": volume.shape[-1], + "is_oriented": False + } + + else: + if ext in DICOM_EXTS: + dir_path = Path(filepath).resolve().parent + reader = sitk.ImageSeriesReader() + reader.MetaDataDictionaryArrayUpdateOn() + reader.LoadPrivateTagsOn() + series_files = reader.GetGDCMSeriesFileNames( + str(dir_path), series_id) + reader.SetFileNames(series_files) + + itk_volume = reader.Execute() + # for k in reader.GetMetaDataKeys(0): + # v = reader.GetMetaData(0, k) + # print(f'({k}) = = "{v}"') + + def get_meta(key): + try: + stirng = reader.GetMetaData(0, key).removesuffix(" ") + if stirng in ["No study description", + "No series description"]: + return None + else: + return stirng + except: + return None + + study_description = get_meta("0008|1030") + series_description = get_meta("0008|103e") + series_modality = get_meta("0008|0060") + + name = study_description or dir_path.name + if series_description and series_modality: + description = f"{series_description}-{series_modality}" + elif series_description: + description = series_description + elif series_modality: + description = series_modality + else: + description = "" + + name = name.replace(" ", "-") + description = description.replace(" ", "-") + + elif ext in SEQUENCE_EXTS: + itk_volume = sitk.ReadImage(sequence) + name = get_sequence_name(filepath).replace(" ", "-") + description = "" + else: + itk_volume = sitk.ReadImage(filepath) + name = Path(filepath).name.removesuffix(ext).replace(" ", "-") + description = "" + + # for key in itk_volume.GetMetaDataKeys(): + # print(f"{key},{itk_volume.GetMetaData(key)}") + + if progress_callback: + progressing = progress_callback(0.5, "Transpose to 'TXYZC'...") + if not progressing: + raise CancelledByUser + + if itk_volume.GetDimension() == 3: + itk_volume = sitk.DICOMOrient(itk_volume, 'RAS') + + volume = sitk.GetArrayFromImage(itk_volume) + + # transpose ijk to kji + if volume.ndim == 4: + volume = np.transpose(volume, (2, 1, 0, 3)) + else: + volume = np.transpose(volume) + volume = np.expand_dims(volume, axis=-1) # expend channel + + volume = np.expand_dims(volume, axis=0) # expend frame + + meta = { + "name": name, + "description": description, + "shape": tuple(itk_volume.GetSize()), + "spacing": tuple(itk_volume.GetSpacing()), + "origin": tuple(itk_volume.GetOrigin()), + "direction": tuple(itk_volume.GetDirection()), + "frame_count": volume.shape[0], + "channel_count": volume.shape[-1], + "is_oriented": True + } + + if itk_volume.GetDimension() == 4: + # FIXME: not sure... + direction = np.array(itk_volume.GetDirection()) + direction = direction.reshape(3, 3) if itk_volume.GetDimension() == 3 \ + else direction.reshape(4, 4) + + direction = direction[1:, 1:] + direction = tuple(direction.flatten()) + + volume = sitk.GetArrayFromImage(itk_volume) + + if volume.ndim == 5: + volume = np.transpose(volume, (0, 3, 2, 1, 4)) + else: + volume = np.transpose(volume, (0, 3, 2, 1)) + volume = np.expand_dims(volume, axis=-1) + + meta = { + "name": name, + "description": description, + "shape": tuple(itk_volume.GetSize()[:3]), + "spacing": tuple(itk_volume.GetSpacing()[:3]), + "origin": tuple(itk_volume.GetOrigin()[:3]), + "direction": direction, + "frame_count": volume.shape[0], + "channel_count": volume.shape[-1], + "is_oriented": False + } + + return volume, meta diff --git a/bioxelnodes/requirements.txt b/bioxelnodes/requirements.txt index 2a2ba13..d46025a 100644 --- a/bioxelnodes/requirements.txt +++ b/bioxelnodes/requirements.txt @@ -1 +1,3 @@ -SimpleITK==2.3.1 # Insight Segmentation and Registration Toolkit. \ No newline at end of file +SimpleITK==2.3.1 # Insight Segmentation and Registration Toolkit. +pyometiff==1.0.0 # .ome.tiff +mrcfile==1.5.1 # .map \ No newline at end of file diff --git a/bioxelnodes/utils.py b/bioxelnodes/utils.py index 9879697..e1b0a76 100644 --- a/bioxelnodes/utils.py +++ b/bioxelnodes/utils.py @@ -72,12 +72,19 @@ def get_nodes_by_type(node_group, type_name: str): return [node for node in node_group.nodes if get_node_type(node) == type_name] -def show_message(message="", title="Message Box", icon='INFO'): +def progress_bar(self, context): + row = self.layout.row() + row.progress( + factor=context.window_manager.bioxelnodes_progress_factor, + type="BAR", + text=context.window_manager.bioxelnodes_progress_text + ) + row.scale_x = 2 - def draw(self, context): - self.layout.label(text=message) - bpy.context.window_manager.popup_menu(draw, title=title, icon=icon) +def progress_update(context, factor, text=""): + context.window_manager.bioxelnodes_progress_factor = factor + context.window_manager.bioxelnodes_progress_text = text def calc_bbox_verts(origin, size): @@ -157,7 +164,7 @@ def save_vdb(grids, context): cache_dir.mkdir(parents=True, exist_ok=True) vdb_path = Path(cache_dir, f"{uuid4()}.vdb") - print(f"Storing the VDB file ({str(vdb_path)})...") + # print(f"Storing the VDB file ({str(vdb_path)})...") vdb.write(str(vdb_path), grids=grids) return vdb_path @@ -174,7 +181,7 @@ def save_vdbs(grids_sequence, context): vdb_paths = [] for f, grids in enumerate(grids_sequence): vdb_path = Path(vdb_dir_path, f"{vdb_name}.{str(f+1).zfill(4)}.vdb") - print(f"Storing the VDB file ({str(vdb_path)})...") + # print(f"Storing the VDB file ({str(vdb_path)})...") vdb.write(str(vdb_path), grids=grids) vdb_paths.append(vdb_path) diff --git a/extension/__init__.py b/extension/__init__.py index 6be166f..6e3ec9f 100644 --- a/extension/__init__.py +++ b/extension/__init__.py @@ -9,6 +9,8 @@ def register(): auto_load.register() + bpy.types.WindowManager.bioxelnodes_progress_factor = bpy.props.FloatProperty() + bpy.types.WindowManager.bioxelnodes_progress_text = bpy.props.StringProperty() menus.add() diff --git a/extension/blender_manifest.toml b/extension/blender_manifest.toml index 74ffae5..9fb2799 100644 --- a/extension/blender_manifest.toml +++ b/extension/blender_manifest.toml @@ -3,7 +3,7 @@ schema_version = "1.0.0" # Example of manifest file for a Blender extension # Change the values according to your extension id = "bioxelnodes" -version = "0.2.7" +version = "0.2.8" name = "Bioxel Nodes" tagline = "For scientific volumetric data visualization in Blender" maintainer = "Ma Nan " @@ -15,7 +15,7 @@ website = "https://omoolab.github.io/BioxelNodes/latest" # Optional list defined by Blender and server, see: # https://docs.blender.org/manual/en/dev/advanced/extensions/tags.html -tags = ["Geometry Nodes", "Render"] +tags = ["Geometry Nodes", "Render", "Import-Export"] blender_version_min = "4.2.0" # Optional: maximum supported Blender version @@ -38,7 +38,11 @@ platforms = ["windows-x64"] # Optional: bundle 3rd party Python modules. # https://docs.blender.org/manual/en/dev/advanced/extensions/python_wheels.html wheels = [ - "./wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl" + "./wheels/SimpleITK-2.3.1-cp311-cp311-win_amd64.whl", + "./wheels/lxml-5.2.2-cp311-cp311-win_amd64.whl", + "./wheels/tifffile-2024.7.21-py3-none-any.whl", + "./wheels/pyometiff-1.0.0-py3-none-any.whl", + "./wheels/mrcfile-1.5.1-py2.py3-none-any.whl" ] diff --git a/mkdocs.yml b/mkdocs.yml index 724a76d..7781f0d 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -13,7 +13,7 @@ nav: - Prepare Your Data: prepare_data.md - Step by Step: step_by_step.md - Concepts & Pipeline: concept.md - - Visualizating any Bio-structure: + - Visualizing any Bio-structure: - Import & Preprocess: import.md - Mask Methods: mask.md - Assgin a Shader: shader.md diff --git a/poetry.lock b/poetry.lock index bb06e23..779500f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -539,6 +539,49 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] +[[package]] +name = "imagecodecs" +version = "2024.6.1" +description = "Image transformation, compression, and decompression codecs" +optional = false +python-versions = ">=3.9" +files = [ + {file = "imagecodecs-2024.6.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:369816eaddfe6e9d8d1faa6794341c89f3494fef846c9b5d834f77dc7583bfdf"}, + {file = "imagecodecs-2024.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9300198b5e4ec09f94f5d6bdd6f727d02bbccba5ed0c974e9931d3f9d5d7fa35"}, + {file = "imagecodecs-2024.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42eafc88b3dd44cfb7a8b076ff64ff794874e88d45c3691b32e9e93fbc42e86e"}, + {file = "imagecodecs-2024.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f428f141ed102cecf98ffbe61ed02b1f3f7d65db0bf1459cf2e7b1a53fb279b2"}, + {file = "imagecodecs-2024.6.1-cp310-cp310-win32.whl", hash = "sha256:5526a7e41939613a5c68403911f7cc738f6fe1a8ac0456535720f53253497b76"}, + {file = "imagecodecs-2024.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:8d3d82ebe83a3e31ab7f09a8b72155c5436a3f87145052ca57dc5caf3a2dc9c0"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:328ea133e0f292cf54c5feb13e247fbf45a6055c8dc6822e841c208d2dc5c96a"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8045ea3a9c9de78ea00e2a387f47d784434bfad05967decbe0c1b3bee5aadf25"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42bd9ec14e4d38f15e2fa387c90b726dba42c16da0a9b6ff2c23e01478b8cd93"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eb5b2d755a64de9a7e0604d5dcc1151c96b43b4e5ac69bebc6d8d790b77ca58"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-win32.whl", hash = "sha256:03ace438a843e024239cddbe7fe6940bd2a6cf3316b08c281b95842b5217c0f7"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:cd926589c6e3c564490b93258b1a2ca3b040da10c21e99b618b7be6dd76b2a25"}, + {file = "imagecodecs-2024.6.1-cp311-cp311-win_arm64.whl", hash = "sha256:101fcef57aedb8730d1d2d1779dfbaa23daf7e50cd4130e88945a4fe34d0212f"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:3c5e0ebdf7e1f8ec23a6d3c4b06fc7a64f41ec47ba23516458c5a763685f29e3"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:407d2859d62ed5834e69e74d9ebcbc2d30be71e4f1ee14fae37f1179110fec8c"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066aab64446a1d87271c8036a3d9f03dfac8678993e4e1e97923acd0d10f355"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3467f4929fe0159c91e59a1f186a656e3aa3ad330079ab3af3d7edff7603b82"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-win32.whl", hash = "sha256:a9b4939934bde291f5b107fcc01dbd6d4b4307eb36915c880600592839cab682"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:35ddab6947bcf4c04bc0e5d171769c40ffdea07eb908e62de53d2dde3985d59d"}, + {file = "imagecodecs-2024.6.1-cp312-cp312-win_arm64.whl", hash = "sha256:0bd70e34ff9b14ea299c1cdc51db4a80c2c406ae2f422e5e400716e8df791bdc"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:3d39699ddf13d8ce67b6a1a04e92a9c318613c0eecc245861590fed78f09e2a1"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:421be324c4d0578ae1be6b75ebddf1cbe4e8092a83d31d2a8fa8021bc75e12d2"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ababc300ec18c28e8fd515ad92252679742c243e88cdb7c5e217c72eaed8fa3a"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc50b613738a3b62aa77c00d457a9e9a6337e9fb7a2b8163d99a368d4a346f8d"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-win32.whl", hash = "sha256:38c6a929ca5356ab9ffdd4aa6dcae5156a7265f886b662bd8cfb0bca1e3d6bee"}, + {file = "imagecodecs-2024.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:01e41f59ebb7b09dc965cafd264cab0ee303d3cef981ecceb85bb556b933a8f3"}, + {file = "imagecodecs-2024.6.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:20d66ea962e9e6ea690a89f56ff9da7831378990d705c1fb133cddac2f2c507a"}, + {file = "imagecodecs-2024.6.1.tar.gz", hash = "sha256:0f3e94b7f51e2f78287b7ffae82cd850b1007639148894538274fa50bd179886"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +all = ["matplotlib", "numcodecs", "tifffile"] + [[package]] name = "imageio" version = "2.34.2" @@ -784,6 +827,164 @@ dev = ["changelist (==0.5)"] lint = ["pre-commit (==3.7.0)"] test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] +[[package]] +name = "lxml" +version = "5.2.2" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +optional = false +python-versions = ">=3.6" +files = [ + {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:364d03207f3e603922d0d3932ef363d55bbf48e3647395765f9bfcbdf6d23632"}, + {file = "lxml-5.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50127c186f191b8917ea2fb8b206fbebe87fd414a6084d15568c27d0a21d60db"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74e4f025ef3db1c6da4460dd27c118d8cd136d0391da4e387a15e48e5c975147"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:981a06a3076997adf7c743dcd0d7a0415582661e2517c7d961493572e909aa1d"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aef5474d913d3b05e613906ba4090433c515e13ea49c837aca18bde190853dff"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e275ea572389e41e8b039ac076a46cb87ee6b8542df3fff26f5baab43713bca"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5b65529bb2f21ac7861a0e94fdbf5dc0daab41497d18223b46ee8515e5ad297"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bcc98f911f10278d1daf14b87d65325851a1d29153caaf146877ec37031d5f36"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_ppc64le.whl", hash = "sha256:b47633251727c8fe279f34025844b3b3a3e40cd1b198356d003aa146258d13a2"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_s390x.whl", hash = "sha256:fbc9d316552f9ef7bba39f4edfad4a734d3d6f93341232a9dddadec4f15d425f"}, + {file = "lxml-5.2.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:13e69be35391ce72712184f69000cda04fc89689429179bc4c0ae5f0b7a8c21b"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b6a30a9ab040b3f545b697cb3adbf3696c05a3a68aad172e3fd7ca73ab3c835"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:a233bb68625a85126ac9f1fc66d24337d6e8a0f9207b688eec2e7c880f012ec0"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:dfa7c241073d8f2b8e8dbc7803c434f57dbb83ae2a3d7892dd068d99e96efe2c"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1a7aca7964ac4bb07680d5c9d63b9d7028cace3e2d43175cb50bba8c5ad33316"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ae4073a60ab98529ab8a72ebf429f2a8cc612619a8c04e08bed27450d52103c0"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ffb2be176fed4457e445fe540617f0252a72a8bc56208fd65a690fdb1f57660b"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e290d79a4107d7d794634ce3e985b9ae4f920380a813717adf61804904dc4393"}, + {file = "lxml-5.2.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:96e85aa09274955bb6bd483eaf5b12abadade01010478154b0ec70284c1b1526"}, + {file = "lxml-5.2.2-cp310-cp310-win32.whl", hash = "sha256:f956196ef61369f1685d14dad80611488d8dc1ef00be57c0c5a03064005b0f30"}, + {file = "lxml-5.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:875a3f90d7eb5c5d77e529080d95140eacb3c6d13ad5b616ee8095447b1d22e7"}, + {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45f9494613160d0405682f9eee781c7e6d1bf45f819654eb249f8f46a2c22545"}, + {file = "lxml-5.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0b3f2df149efb242cee2ffdeb6674b7f30d23c9a7af26595099afaf46ef4e88"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d28cb356f119a437cc58a13f8135ab8a4c8ece18159eb9194b0d269ec4e28083"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:657a972f46bbefdbba2d4f14413c0d079f9ae243bd68193cb5061b9732fa54c1"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b9ea10063efb77a965a8d5f4182806fbf59ed068b3c3fd6f30d2ac7bee734"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07542787f86112d46d07d4f3c4e7c760282011b354d012dc4141cc12a68cef5f"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:303f540ad2dddd35b92415b74b900c749ec2010e703ab3bfd6660979d01fd4ed"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2eb2227ce1ff998faf0cd7fe85bbf086aa41dfc5af3b1d80867ecfe75fb68df3"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_ppc64le.whl", hash = "sha256:1d8a701774dfc42a2f0b8ccdfe7dbc140500d1049e0632a611985d943fcf12df"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_s390x.whl", hash = "sha256:56793b7a1a091a7c286b5f4aa1fe4ae5d1446fe742d00cdf2ffb1077865db10d"}, + {file = "lxml-5.2.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:eb00b549b13bd6d884c863554566095bf6fa9c3cecb2e7b399c4bc7904cb33b5"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a2569a1f15ae6c8c64108a2cd2b4a858fc1e13d25846be0666fc144715e32ab"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:8cf85a6e40ff1f37fe0f25719aadf443686b1ac7652593dc53c7ef9b8492b115"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:d237ba6664b8e60fd90b8549a149a74fcc675272e0e95539a00522e4ca688b04"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0b3f5016e00ae7630a4b83d0868fca1e3d494c78a75b1c7252606a3a1c5fc2ad"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23441e2b5339bc54dc949e9e675fa35efe858108404ef9aa92f0456929ef6fe8"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2fb0ba3e8566548d6c8e7dd82a8229ff47bd8fb8c2da237607ac8e5a1b8312e5"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:79d1fb9252e7e2cfe4de6e9a6610c7cbb99b9708e2c3e29057f487de5a9eaefa"}, + {file = "lxml-5.2.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6dcc3d17eac1df7859ae01202e9bb11ffa8c98949dcbeb1069c8b9a75917e01b"}, + {file = "lxml-5.2.2-cp311-cp311-win32.whl", hash = "sha256:4c30a2f83677876465f44c018830f608fa3c6a8a466eb223535035fbc16f3438"}, + {file = "lxml-5.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:49095a38eb333aaf44c06052fd2ec3b8f23e19747ca7ec6f6c954ffea6dbf7be"}, + {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7429e7faa1a60cad26ae4227f4dd0459efde239e494c7312624ce228e04f6391"}, + {file = "lxml-5.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:50ccb5d355961c0f12f6cf24b7187dbabd5433f29e15147a67995474f27d1776"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc911208b18842a3a57266d8e51fc3cfaccee90a5351b92079beed912a7914c2"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33ce9e786753743159799fdf8e92a5da351158c4bfb6f2db0bf31e7892a1feb5"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec87c44f619380878bd49ca109669c9f221d9ae6883a5bcb3616785fa8f94c97"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08ea0f606808354eb8f2dfaac095963cb25d9d28e27edcc375d7b30ab01abbf6"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75a9632f1d4f698b2e6e2e1ada40e71f369b15d69baddb8968dcc8e683839b18"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74da9f97daec6928567b48c90ea2c82a106b2d500f397eeb8941e47d30b1ca85"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_ppc64le.whl", hash = "sha256:0969e92af09c5687d769731e3f39ed62427cc72176cebb54b7a9d52cc4fa3b73"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_s390x.whl", hash = "sha256:9164361769b6ca7769079f4d426a41df6164879f7f3568be9086e15baca61466"}, + {file = "lxml-5.2.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d26a618ae1766279f2660aca0081b2220aca6bd1aa06b2cf73f07383faf48927"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab67ed772c584b7ef2379797bf14b82df9aa5f7438c5b9a09624dd834c1c1aaf"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:3d1e35572a56941b32c239774d7e9ad724074d37f90c7a7d499ab98761bd80cf"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:8268cbcd48c5375f46e000adb1390572c98879eb4f77910c6053d25cc3ac2c67"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e282aedd63c639c07c3857097fc0e236f984ceb4089a8b284da1c526491e3f3d"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfdc2bfe69e9adf0df4915949c22a25b39d175d599bf98e7ddf620a13678585"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4aefd911793b5d2d7a921233a54c90329bf3d4a6817dc465f12ffdfe4fc7b8fe"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8df03a9e995b6211dafa63b32f9d405881518ff1ddd775db4e7b98fb545e1c"}, + {file = "lxml-5.2.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f11ae142f3a322d44513de1018b50f474f8f736bc3cd91d969f464b5bfef8836"}, + {file = "lxml-5.2.2-cp312-cp312-win32.whl", hash = "sha256:16a8326e51fcdffc886294c1e70b11ddccec836516a343f9ed0f82aac043c24a"}, + {file = "lxml-5.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:bbc4b80af581e18568ff07f6395c02114d05f4865c2812a1f02f2eaecf0bfd48"}, + {file = "lxml-5.2.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e3d9d13603410b72787579769469af730c38f2f25505573a5888a94b62b920f8"}, + {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38b67afb0a06b8575948641c1d6d68e41b83a3abeae2ca9eed2ac59892b36706"}, + {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c689d0d5381f56de7bd6966a4541bff6e08bf8d3871bbd89a0c6ab18aa699573"}, + {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:cf2a978c795b54c539f47964ec05e35c05bd045db5ca1e8366988c7f2fe6b3ce"}, + {file = "lxml-5.2.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:739e36ef7412b2bd940f75b278749106e6d025e40027c0b94a17ef7968d55d56"}, + {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d8bbcd21769594dbba9c37d3c819e2d5847656ca99c747ddb31ac1701d0c0ed9"}, + {file = "lxml-5.2.2-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:2304d3c93f2258ccf2cf7a6ba8c761d76ef84948d87bf9664e14d203da2cd264"}, + {file = "lxml-5.2.2-cp36-cp36m-win32.whl", hash = "sha256:02437fb7308386867c8b7b0e5bc4cd4b04548b1c5d089ffb8e7b31009b961dc3"}, + {file = "lxml-5.2.2-cp36-cp36m-win_amd64.whl", hash = "sha256:edcfa83e03370032a489430215c1e7783128808fd3e2e0a3225deee278585196"}, + {file = "lxml-5.2.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:28bf95177400066596cdbcfc933312493799382879da504633d16cf60bba735b"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a745cc98d504d5bd2c19b10c79c61c7c3df9222629f1b6210c0368177589fb8"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b590b39ef90c6b22ec0be925b211298e810b4856909c8ca60d27ffbca6c12e6"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b336b0416828022bfd5a2e3083e7f5ba54b96242159f83c7e3eebaec752f1716"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:c2faf60c583af0d135e853c86ac2735ce178f0e338a3c7f9ae8f622fd2eb788c"}, + {file = "lxml-5.2.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:4bc6cb140a7a0ad1f7bc37e018d0ed690b7b6520ade518285dc3171f7a117905"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7ff762670cada8e05b32bf1e4dc50b140790909caa8303cfddc4d702b71ea184"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57f0a0bbc9868e10ebe874e9f129d2917750adf008fe7b9c1598c0fbbfdde6a6"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:a6d2092797b388342c1bc932077ad232f914351932353e2e8706851c870bca1f"}, + {file = "lxml-5.2.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:60499fe961b21264e17a471ec296dcbf4365fbea611bf9e303ab69db7159ce61"}, + {file = "lxml-5.2.2-cp37-cp37m-win32.whl", hash = "sha256:d9b342c76003c6b9336a80efcc766748a333573abf9350f4094ee46b006ec18f"}, + {file = "lxml-5.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b16db2770517b8799c79aa80f4053cd6f8b716f21f8aca962725a9565ce3ee40"}, + {file = "lxml-5.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7ed07b3062b055d7a7f9d6557a251cc655eed0b3152b76de619516621c56f5d3"}, + {file = "lxml-5.2.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f60fdd125d85bf9c279ffb8e94c78c51b3b6a37711464e1f5f31078b45002421"}, + {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a7e24cb69ee5f32e003f50e016d5fde438010c1022c96738b04fc2423e61706"}, + {file = "lxml-5.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23cfafd56887eaed93d07bc4547abd5e09d837a002b791e9767765492a75883f"}, + {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19b4e485cd07b7d83e3fe3b72132e7df70bfac22b14fe4bf7a23822c3a35bff5"}, + {file = "lxml-5.2.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7ce7ad8abebe737ad6143d9d3bf94b88b93365ea30a5b81f6877ec9c0dee0a48"}, + {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e49b052b768bb74f58c7dda4e0bdf7b79d43a9204ca584ffe1fb48a6f3c84c66"}, + {file = "lxml-5.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d14a0d029a4e176795cef99c056d58067c06195e0c7e2dbb293bf95c08f772a3"}, + {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:be49ad33819d7dcc28a309b86d4ed98e1a65f3075c6acd3cd4fe32103235222b"}, + {file = "lxml-5.2.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a6d17e0370d2516d5bb9062c7b4cb731cff921fc875644c3d751ad857ba9c5b1"}, + {file = "lxml-5.2.2-cp38-cp38-win32.whl", hash = "sha256:5b8c041b6265e08eac8a724b74b655404070b636a8dd6d7a13c3adc07882ef30"}, + {file = "lxml-5.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61efaf4bed1cc0860e567d2ecb2363974d414f7f1f124b1df368bbf183453a6"}, + {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb91819461b1b56d06fa4bcf86617fac795f6a99d12239fb0c68dbeba41a0a30"}, + {file = "lxml-5.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4ed0c7cbecde7194cd3228c044e86bf73e30a23505af852857c09c24e77ec5d"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54401c77a63cc7d6dc4b4e173bb484f28a5607f3df71484709fe037c92d4f0ed"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:625e3ef310e7fa3a761d48ca7ea1f9d8718a32b1542e727d584d82f4453d5eeb"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:519895c99c815a1a24a926d5b60627ce5ea48e9f639a5cd328bda0515ea0f10c"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c7079d5eb1c1315a858bbf180000757db8ad904a89476653232db835c3114001"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:343ab62e9ca78094f2306aefed67dcfad61c4683f87eee48ff2fd74902447726"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:cd9e78285da6c9ba2d5c769628f43ef66d96ac3085e59b10ad4f3707980710d3"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_ppc64le.whl", hash = "sha256:546cf886f6242dff9ec206331209db9c8e1643ae642dea5fdbecae2453cb50fd"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_s390x.whl", hash = "sha256:02f6a8eb6512fdc2fd4ca10a49c341c4e109aa6e9448cc4859af5b949622715a"}, + {file = "lxml-5.2.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:339ee4a4704bc724757cd5dd9dc8cf4d00980f5d3e6e06d5847c1b594ace68ab"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0a028b61a2e357ace98b1615fc03f76eb517cc028993964fe08ad514b1e8892d"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f90e552ecbad426eab352e7b2933091f2be77115bb16f09f78404861c8322981"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:d83e2d94b69bf31ead2fa45f0acdef0757fa0458a129734f59f67f3d2eb7ef32"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a02d3c48f9bb1e10c7788d92c0c7db6f2002d024ab6e74d6f45ae33e3d0288a3"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6d68ce8e7b2075390e8ac1e1d3a99e8b6372c694bbe612632606d1d546794207"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:453d037e09a5176d92ec0fd282e934ed26d806331a8b70ab431a81e2fbabf56d"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:3b019d4ee84b683342af793b56bb35034bd749e4cbdd3d33f7d1107790f8c472"}, + {file = "lxml-5.2.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb3942960f0beb9f46e2a71a3aca220d1ca32feb5a398656be934320804c0df9"}, + {file = "lxml-5.2.2-cp39-cp39-win32.whl", hash = "sha256:ac6540c9fff6e3813d29d0403ee7a81897f1d8ecc09a8ff84d2eea70ede1cdbf"}, + {file = "lxml-5.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:610b5c77428a50269f38a534057444c249976433f40f53e3b47e68349cca1425"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b537bd04d7ccd7c6350cdaaaad911f6312cbd61e6e6045542f781c7f8b2e99d2"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4820c02195d6dfb7b8508ff276752f6b2ff8b64ae5d13ebe02e7667e035000b9"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a09f6184f17a80897172863a655467da2b11151ec98ba8d7af89f17bf63dae"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:76acba4c66c47d27c8365e7c10b3d8016a7da83d3191d053a58382311a8bf4e1"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b128092c927eaf485928cec0c28f6b8bead277e28acf56800e972aa2c2abd7a2"}, + {file = "lxml-5.2.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ae791f6bd43305aade8c0e22f816b34f3b72b6c820477aab4d18473a37e8090b"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a2f6a1bc2460e643785a2cde17293bd7a8f990884b822f7bca47bee0a82fc66b"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e8d351ff44c1638cb6e980623d517abd9f580d2e53bfcd18d8941c052a5a009"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec4bd9133420c5c52d562469c754f27c5c9e36ee06abc169612c959bd7dbb07"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:55ce6b6d803890bd3cc89975fca9de1dff39729b43b73cb15ddd933b8bc20484"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ab6a358d1286498d80fe67bd3d69fcbc7d1359b45b41e74c4a26964ca99c3f8"}, + {file = "lxml-5.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:06668e39e1f3c065349c51ac27ae430719d7806c026fec462e5693b08b95696b"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9cd5323344d8ebb9fb5e96da5de5ad4ebab993bbf51674259dbe9d7a18049525"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89feb82ca055af0fe797a2323ec9043b26bc371365847dbe83c7fd2e2f181c34"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e481bba1e11ba585fb06db666bfc23dbe181dbafc7b25776156120bf12e0d5a6"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d6c6ea6a11ca0ff9cd0390b885984ed31157c168565702959c25e2191674a14"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3d98de734abee23e61f6b8c2e08a88453ada7d6486dc7cdc82922a03968928db"}, + {file = "lxml-5.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:69ab77a1373f1e7563e0fb5a29a8440367dec051da6c7405333699d07444f511"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34e17913c431f5ae01d8658dbf792fdc457073dcdfbb31dc0cc6ab256e664a8d"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05f8757b03208c3f50097761be2dea0aba02e94f0dc7023ed73a7bb14ff11eb0"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a520b4f9974b0a0a6ed73c2154de57cdfd0c8800f4f15ab2b73238ffed0b36e"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5e097646944b66207023bc3c634827de858aebc226d5d4d6d16f0b77566ea182"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b5e4ef22ff25bfd4ede5f8fb30f7b24446345f3e79d9b7455aef2836437bc38a"}, + {file = "lxml-5.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ff69a9a0b4b17d78170c73abe2ab12084bdf1691550c5629ad1fe7849433f324"}, + {file = "lxml-5.2.2.tar.gz", hash = "sha256:bb2dc4898180bea79863d5487e5f9c7c34297414bad54bcd0f0852aee9cfdb87"}, +] + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html-clean = ["lxml-html-clean"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=3.0.10)"] + [[package]] name = "markdown" version = "3.6" @@ -1075,6 +1276,20 @@ files = [ griffe = ">=0.37" mkdocstrings = ">=0.20" +[[package]] +name = "mrcfile" +version = "1.5.1" +description = "MRC file I/O library" +optional = false +python-versions = "*" +files = [ + {file = "mrcfile-1.5.1-py2.py3-none-any.whl", hash = "sha256:06900f1245e66dd4617cbd4a7117a2d75d53fc4e5b74d811766f71a858b059a9"}, + {file = "mrcfile-1.5.1.tar.gz", hash = "sha256:403c4bb0ac842410ce5ea501f4fddc91ea37c12ef869d508d3ac571868d82ac2"}, +] + +[package.dependencies] +numpy = ">=1.16.0" + [[package]] name = "nest-asyncio" version = "1.6.0" @@ -1396,13 +1611,13 @@ tests = ["pytest"] [[package]] name = "pycodestyle" -version = "2.11.1" +version = "2.12.0" description = "Python style guide checker" optional = false python-versions = ">=3.8" files = [ - {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, - {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, + {file = "pycodestyle-2.12.0-py2.py3-none-any.whl", hash = "sha256:949a39f6b86c3e1515ba1787c2022131d165a8ad271b11370a8819aa070269e4"}, + {file = "pycodestyle-2.12.0.tar.gz", hash = "sha256:442f950141b4f43df752dd303511ffded3a04c2b6fb7f65980574f0c31e6e79c"}, ] [[package]] @@ -1448,6 +1663,26 @@ pyyaml = "*" [package.extras] extra = ["pygments (>=2.12)"] +[[package]] +name = "pyometiff" +version = "1.0.0" +description = "Read and Write OME-TIFFs in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyometiff-1.0.0-py3-none-any.whl", hash = "sha256:6c274786f32b9bd662105af37419c7f2f7ee6194bc1a7283b4ab2a00c53cf632"}, + {file = "pyometiff-1.0.0.tar.gz", hash = "sha256:596b7a7377a5f2e50292aa52dd22e6347a2d2f21577e93d1fcdcd63942e597f7"}, +] + +[package.dependencies] +imagecodecs = "*" +lxml = "*" +numpy = "*" +tifffile = ">2020.10.1" + +[package.extras] +dev = ["mock", "pytest (>3.7)"] + [[package]] name = "pyparsing" version = "3.1.2" @@ -2225,4 +2460,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = "~=3.10.0" -content-hash = "e9980867106af8e9e7524ff319d3c0785906b359022595732260fd4b3ca135ca" +content-hash = "5eb6cfca92f09b1f4bba15042adc44cafa174f28d7b4520bde92f0f7235fd233" diff --git a/pyproject.toml b/pyproject.toml index 3af895f..0e28923 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "bioxelnodes" -version = "0.2.7" +version = "0.2.8" description = "" authors = ["Ma Nan "] license = "MIT" @@ -11,6 +11,8 @@ python = "~=3.10.0" bpy = "~=4.0" simpleitk = "^2.3.1" scikit-image = "^0.24.0" +pyometiff = "^1.0.0" +mrcfile = "^1.5.1" [tool.poetry.group.dev.dependencies] ipykernel = "^6.25.2"