diff --git a/.github/workflows/run_unit_tests.yml b/.github/workflows/run_unit_tests.yml
index 29a380f2..8c533ea8 100644
--- a/.github/workflows/run_unit_tests.yml
+++ b/.github/workflows/run_unit_tests.yml
@@ -16,6 +16,7 @@ jobs:
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
+ dependencies: [".", "'.[libjpeg]'"]
steps:
- uses: actions/checkout@v2
@@ -27,7 +28,7 @@ jobs:
run: |
python -m pip install --upgrade pip setuptools
pip install -r requirements_test.txt
- pip install .
+ pip install ${{ matrix.dependencies }}
- name: Lint with flake8
run: |
flake8 --exclude='bin,build,.eggs,src/highdicom/_*'
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 00000000..a5ed42d8
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,42 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite our paper."
+authors:
+- family-names: "Herrmann"
+ given-names: "Markus D."
+- family-names: "Bridge"
+ given-names: "Christopher P."
+- family-names: "Fedorov"
+ given-names: "Andriy Y."
+- family-names: "Pieper"
+ given-names: "Steven"
+- family-names: "Doyle"
+ given-names: "Sean W."
+- family-names: "Gorman"
+ given-names: "Chris"
+preferred-citation:
+ type: article
+ authors:
+ - family-names: "Bridge"
+ given-names: "Christopher P."
+ orcid: "https://orcid.org/0000-0002-2242-351X"
+ - family-names: "Gorman"
+ given-names: "Chris"
+ - family-names: "Pieper"
+ given-names: "Steven"
+ - family-names: "Doyle"
+ given-names: "Sean W."
+ - family-names: "Lennerz"
+ given-names: "Jochen K."
+ - family-names: "Kalpathy-Cramer"
+ given-names: "Jayashree "
+ - family-names: "Clunie"
+ given-names: "David A."
+ - family-names: "Fedorov"
+ given-names: "Andriy Y."
+ - family-names: "Herrmann"
+ given-names: "Markus D."
+ orcid: "https://orcid.org/0000-0002-7257-9205"
+ title: "Highdicom: a Python Library for Standardized Encoding of Image Annotations and Machine Learning Model Outputs in Pathology and Radiology"
+ journal: "J Digit Imaging"
+ year: 2022
+ doi: 10.1007/s10278-022-00683-y
diff --git a/data/test_files/dx_image.dcm b/data/test_files/dx_image.dcm
new file mode 100644
index 00000000..a9b52654
Binary files /dev/null and b/data/test_files/dx_image.dcm differ
diff --git a/docs/installation.rst b/docs/installation.rst
index decbd4d3..5fa1e1a6 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -8,7 +8,7 @@ Installation guide
Requirements
------------
-* `Python `_ (version 3.5 or higher)
+* `Python `_ (version 3.6 or higher)
* Python package manager `pip `_
.. _installation:
@@ -22,7 +22,22 @@ Pre-build package available at PyPi:
pip install highdicom
-Source code available at Github:
+The library relies on the underlying ``pydicom`` package for decoding of pixel
+data, which internally delegates the task to either the ``pillow`` or the
+``pylibjpeg`` packages. Since ``pillow`` is a dependency of *highdicom* and
+will automatically be installed, some transfer syntax can thus be readily
+decoded and encoded (baseline JPEG, JPEG-2000, JPEG-LS). Support for additional
+transfer syntaxes (e.g., lossless JPEG) requires installation of the
+``pylibjpeg`` package as well as the ``pylibjpeg-libjpeg`` and
+``pylibjpeg-openjpeg`` packages. Since ``pylibjpeg-libjpeg`` is licensed under
+a copyleft GPL v3 license, it is not installed by default when you install
+*highdicom*. To install the ``pylibjpeg`` packages along with *highdicom*, use
+
+.. code-block:: none
+
+ pip install highdicom[libjpeg]
+
+Install directly from source code (available on Github):
.. code-block:: none
diff --git a/docs/release_notes.rst b/docs/release_notes.rst
index 44f82aeb..cde95a98 100644
--- a/docs/release_notes.rst
+++ b/docs/release_notes.rst
@@ -185,3 +185,19 @@ error.
Similarly, as of highdicom 0.18.0, it is no longer possible to pass datasets
with a Big Endian transfer syntax to the `from_dataset` methods of any of the
:class:`highdicom.SOPClass` subclasses.
+
+.. _update-image-library:
+
+Change in MeasurementReport constructor for TID 1601 enhancement
+----------------------------------------------------------------
+
+A breaking change was made after highdicom 0.18.4 in the creation of Image
+Library TID 1601 objects.
+Previously the Imag Library was constructed by explicitly
+passing a `pydicom.sequence.Sequence` of `ImageLibraryEntryDescriptors`
+objects to the :class:`highdicom.sr.MeasurementReport` constructor in the `image_library_groups`
+argument.
+Now a `pydicom.sequence.Sequence` of `pydicom.dataset.Dataset`
+objects is passed in the `referenced_images` argument and the
+ImageLibrary components are created internally by highdicom.
+This standardizes the content of the Image Library subcomponents.
diff --git a/docs/usage.rst b/docs/usage.rst
index 8bd86bb5..73fe02d3 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -121,7 +121,7 @@ Derive a Segmentation image from a multi-frame Slide Microscopy (SM) image:
)
# Create the Segmentation instance
- seg_dataset = Segmentation(
+ seg_dataset = hd.seg.Segmentation(
source_images=[image_dataset],
pixel_array=mask,
segmentation_type=hd.seg.SegmentationTypeValues.BINARY,
diff --git a/setup.py b/setup.py
index 24323590..2a5c6107 100644
--- a/setup.py
+++ b/setup.py
@@ -55,8 +55,12 @@ def get_version():
'numpy>=1.19',
'pillow>=8.3',
'pillow-jpls>=1.0',
- 'pylibjpeg>=1.4',
- 'pylibjpeg-libjpeg>=1.3',
- 'pylibjpeg-openjpeg>=1.2',
],
+ extras_requires={
+ 'libjpeg': [
+ 'pylibjpeg>=1.4',
+ 'pylibjpeg-libjpeg>=1.3',
+ 'pylibjpeg-openjpeg>=1.2'
+ ],
+ },
)
diff --git a/src/highdicom/_module_utils.py b/src/highdicom/_module_utils.py
index 2a33dd14..8a91a9bf 100644
--- a/src/highdicom/_module_utils.py
+++ b/src/highdicom/_module_utils.py
@@ -3,13 +3,6 @@
from pydicom import Dataset
-from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP
-from highdicom._modules import MODULE_ATTRIBUTE_MAP
-from highdicom._iods import (
- IOD_MODULE_MAP,
- SOP_CLASS_UID_IOD_KEY_MAP
-)
-
# Allowed values for the type of an attribute
class AttributeTypeValues(Enum):
@@ -168,6 +161,7 @@ def construct_module_tree(module: str) -> Dict[str, Any]:
dictionary that forms an item in the next level of the tree structure.
"""
+ from highdicom._modules import MODULE_ATTRIBUTE_MAP
if module not in MODULE_ATTRIBUTE_MAP:
raise AttributeError(f"No such module found: '{module}'.")
tree: Dict[str, Any] = {'attributes': {}}
@@ -205,6 +199,10 @@ def get_module_usage(
"""
+ from highdicom._iods import (
+ IOD_MODULE_MAP,
+ SOP_CLASS_UID_IOD_KEY_MAP
+ )
try:
iod_name = SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid]
except KeyError as e:
@@ -235,6 +233,11 @@ def is_attribute_in_iod(attribute: str, sop_class_uid: str) -> bool:
specified by the sop_class_uid. False otherwise.
"""
+ from highdicom._iods import (
+ IOD_MODULE_MAP,
+ SOP_CLASS_UID_IOD_KEY_MAP
+ )
+ from highdicom._modules import MODULE_ATTRIBUTE_MAP
try:
iod_name = SOP_CLASS_UID_IOD_KEY_MAP[sop_class_uid]
except KeyError as e:
diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py
index c7234ca4..d793eb6a 100644
--- a/src/highdicom/ann/content.py
+++ b/src/highdicom/ann/content.py
@@ -11,7 +11,10 @@
AnnotationGroupGenerationTypeValues,
GraphicTypeValues,
)
-from highdicom.content import AlgorithmIdentificationSequence
+from highdicom.content import (
+ AlgorithmIdentificationSequence,
+ ReferencedImageSequence,
+)
from highdicom.sr.coding import CodedConcept
from highdicom.uid import UID
from highdicom._module_utils import check_required_attributes
@@ -25,7 +28,8 @@ def __init__(
self,
name: Union[Code, CodedConcept],
values: np.ndarray,
- unit: Union[Code, CodedConcept]
+ unit: Union[Code, CodedConcept],
+ referenced_images: Optional[ReferencedImageSequence] = None
) -> None:
"""
Parameters
@@ -40,6 +44,9 @@ def __init__(
unit: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code], optional
Coded units of measurement (see :dcm:`CID 7181 `
"Abstract Multi-dimensional Image Model Component Units")
+ referenced_images: Union[highdicom.ReferencedImageSequence, None], optional
+ Referenced image to which the measurement applies. Should only be
+ provided for intensity measurements.
""" # noqa: E501
super().__init__()
@@ -61,6 +68,22 @@ def __init__(
item.AnnotationIndexList = stored_indices.tobytes()
self.MeasurementValuesSequence = [item]
+ if referenced_images is not None:
+ if len(referenced_images) == 0:
+ raise ValueError(
+ 'Argument "referenced_images" must contain one item.'
+ )
+ elif len(referenced_images) > 1:
+ raise ValueError(
+ 'Argument "referenced_images" must contain only one item.'
+ )
+ if not isinstance(referenced_images, ReferencedImageSequence):
+ raise TypeError(
+ 'Argument "referenced_images" must have type '
+ 'ReferencedImageSequence.'
+ )
+ self.ReferencedImageSequence = referenced_images
+
@property
def name(self) -> CodedConcept:
"""highdicom.sr.CodedConcept: coded name"""
@@ -71,6 +94,14 @@ def unit(self) -> CodedConcept:
"""highdicom.sr.CodedConcept: coded unit"""
return self.MeasurementUnitsCodeSequence[0]
+ @property
+ def referenced_images(self) -> Union[ReferencedImageSequence, None]:
+ """Union[highdicom.ReferencedImageSequence, None]: referenced images"""
+ if hasattr(self, 'ReferencedImageSequence'):
+ return ReferencedImageSequence.from_sequence(self.ReferencedImageSequence)
+ else:
+ return None
+
def get_values(self, number_of_annotations: int) -> np.ndarray:
"""Get measured values for annotations.
@@ -151,6 +182,11 @@ def from_dataset(cls, dataset: Dataset) -> 'Measurements':
measurements.MeasurementUnitsCodeSequence[0]
)
]
+ if hasattr(measurements, 'ReferencedImageSequence'):
+ measurements.ReferencedImageSequence = \
+ ReferencedImageSequence.from_sequence(
+ measurements.ReferencedImageSequence
+ )
return cast(Measurements, measurements)
@@ -520,6 +556,12 @@ def get_graphic_data(
)
else:
if coordinate_type == AnnotationCoordinateTypeValues.SCOORD:
+ if hasattr(self, 'CommonZCoordinateValue'):
+ raise ValueError(
+ 'The annotation group contains the '
+ '"Common Z Coordinate Value" element and therefore '
+ 'cannot have Annotation Coordinate Type "2D".'
+ )
coordinate_dimensionality = 2
else:
coordinate_dimensionality = 3
@@ -633,7 +675,10 @@ def get_measurements(
self,
name: Optional[Union[Code, CodedConcept]] = None
) -> Tuple[
- List[CodedConcept], np.ndarray, List[CodedConcept]
+ List[CodedConcept],
+ np.ndarray,
+ List[CodedConcept],
+ List[Union[ReferencedImageSequence, None]]
]:
"""Get measurements.
@@ -654,6 +699,8 @@ def get_measurements(
given annotation.
units: List[highdicom.sr.CodedConcept]
Units of measurements
+ referenced_images: List[highdicom.ReferencedImageSequence, None]
+ Referenced images
""" # noqa: E501
number_of_annotations = self.number_of_annotations
@@ -675,11 +722,16 @@ def get_measurements(
item.unit for item in self.MeasurementsSequence
if name is None or item.name == name
]
+ referenced_images = [
+ item.referenced_images for item in self.MeasurementsSequence
+ if name is None or item.name == name
+ ]
else:
value_array = np.empty((number_of_annotations, 0), np.float32)
names = []
units = []
- return (names, value_array, units)
+ referenced_images = []
+ return (names, value_array, units, referenced_images)
def _get_coordinate_index(
self,
diff --git a/src/highdicom/base.py b/src/highdicom/base.py
index ca6e056b..f2aa7cad 100644
--- a/src/highdicom/base.py
+++ b/src/highdicom/base.py
@@ -16,8 +16,6 @@
)
from highdicom.valuerep import check_person_name
from highdicom.version import __version__
-from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP
-from highdicom._modules import MODULE_ATTRIBUTE_MAP
from highdicom._module_utils import is_attribute_in_iod
@@ -289,6 +287,8 @@ def _copy_root_attributes_of_module(
DICOM Module (e.g., ``"General Series"`` or ``"Specimen"``)
"""
+ from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP
+ from highdicom._modules import MODULE_ATTRIBUTE_MAP
logger.info(
'copy {}-related attributes from dataset "{}"'.format(
ie, dataset.SOPInstanceUID
diff --git a/src/highdicom/content.py b/src/highdicom/content.py
index d94a0e54..344e26e2 100644
--- a/src/highdicom/content.py
+++ b/src/highdicom/content.py
@@ -10,7 +10,10 @@
from pydicom.sr.coding import Code
from pydicom.sr.codedict import codes
from pydicom.valuerep import DS, format_number_as_ds
-from pydicom._storage_sopclass_uids import SegmentationStorage
+from pydicom.uid import (
+ SegmentationStorage,
+ VLWholeSlideMicroscopyImageStorage,
+)
from highdicom.enum import (
CoordinateSystemNames,
@@ -105,7 +108,7 @@ def from_sequence(
Returns
-------
- highdicom.seg.content.AlgorithmIdentificationSequence
+ highdicom.AlgorithmIdentificationSequence
Algorithm Identification Sequence
"""
@@ -1406,21 +1409,20 @@ def __init__(
referenced_images: Optional[Sequence[Dataset]] = None,
referenced_frame_number: Union[int, Sequence[int], None] = None,
referenced_segment_number: Union[int, Sequence[int], None] = None,
+ referenced_optical_path_identifier: Union[int, None] = None,
):
"""
Parameters
----------
referenced_images: Union[Sequence[pydicom.Dataset], None], optional
- Images to which the VOI LUT described in this dataset applies. Note
- that if unspecified, the VOI LUT applies to every image referenced
- in the presentation state object that this dataset is included in.
+ Images that should be referenced
referenced_frame_number: Union[int, Sequence[int], None], optional
- Frame number(s) within a referenced multiframe image to which this
- VOI LUT applies.
+ Frame number(s) within a referenced multiframe image
referenced_segment_number: Union[int, Sequence[int], None], optional
- Segment number(s) within a referenced segmentation image to which
- this VOI LUT applies.
+ Segment number(s) within a referenced segmentation image
+ referenced_optical_path_identifier: Union[int, None], optional
+ Identifier of the optical path within a referenced microscopy image
"""
super().__init__()
@@ -1445,6 +1447,7 @@ def __init__(
raise ValueError("Found duplicate instances in referenced images.")
multiple_images = len(referenced_images) > 1
+ sop_class_uid = referenced_images[0].SOPClassUID
if referenced_frame_number is not None:
if multiple_images:
raise ValueError(
@@ -1466,16 +1469,17 @@ def __init__(
f'Frame number {f} is invalid for referenced '
'image.'
)
+
if referenced_segment_number is not None:
if multiple_images:
raise ValueError(
'Specifying "referenced_segment_number" is not '
'supported with multiple referenced images.'
)
- if referenced_images[0].SOPClassUID != SegmentationStorage:
+ if sop_class_uid != SegmentationStorage:
raise TypeError(
'"referenced_segment_number" is only valid when the '
- 'referenced image is a segmentation image.'
+ 'referenced image is a Segmentation image.'
)
number_of_segments = len(referenced_images[0].SegmentSequence)
if isinstance(referenced_segment_number, Sequence):
@@ -1485,8 +1489,7 @@ def __init__(
for s in _referenced_segment_numbers:
if s < 1 or s > number_of_segments:
raise ValueError(
- f'Segment number {s} is invalid for referenced '
- 'image.'
+ f'Segment number {s} is invalid for referenced image.'
)
if referenced_frame_number is not None:
# Check that the one of the specified segments exists
@@ -1504,6 +1507,31 @@ def __init__(
f'Referenced frame {f} does not contain any of '
'the referenced segments.'
)
+
+ if referenced_optical_path_identifier is not None:
+ if multiple_images:
+ raise ValueError(
+ 'Specifying "referenced_optical_path_identifier" is not '
+ 'supported with multiple referenced images.'
+ )
+ if sop_class_uid != VLWholeSlideMicroscopyImageStorage:
+ raise TypeError(
+ '"referenced_optical_path_identifier" is only valid when '
+ 'referenced image is a VL Whole Slide Microscopy image.'
+ )
+ has_optical_path = False
+ for ref_img in referenced_images:
+ for optical_path_item in ref_img.OpticalPathSequence:
+ has_optical_path |= (
+ optical_path_item.OpticalPathIdentifier ==
+ referenced_optical_path_identifier
+ )
+ if not has_optical_path:
+ raise ValueError(
+ 'None of the reference images contains the specified '
+ '"referenced_optical_path_identifier".'
+ )
+
for im in referenced_images:
if not does_iod_have_pixel_data(im.SOPClassUID):
raise ValueError(
@@ -1515,10 +1543,50 @@ def __init__(
ref_im.ReferencedSOPClassUID = im.SOPClassUID
if referenced_segment_number is not None:
ref_im.ReferencedSegmentNumber = referenced_segment_number
+ elif referenced_optical_path_identifier is not None:
+ ref_im.ReferencedOpticalPathIdentifier = \
+ str(referenced_optical_path_identifier)
if referenced_frame_number is not None:
ref_im.ReferencedFrameNumber = referenced_frame_number
self.append(ref_im)
+ @classmethod
+ def from_sequence(
+ cls,
+ sequence: DataElementSequence
+ ) -> 'ReferencedImageSequence':
+ """Construct instance from an existing data element sequence.
+
+ Parameters
+ ----------
+ sequence: pydicom.sequence.Sequence
+ Data element sequence representing the
+ Algorithm Identification Sequence
+
+ Returns
+ -------
+ highdicom.ReferencedImageSequence
+ Referenced Image Sequence
+
+ """
+ if not isinstance(sequence, DataElementSequence):
+ raise TypeError(
+ 'Sequence should be of type pydicom.sequence.Sequence.'
+ )
+ if len(sequence) != 1:
+ raise ValueError('Sequence should contain a single item.')
+ check_required_attributes(
+ sequence[0],
+ module='advanced-blending-presentation-state',
+ base_path=[
+ 'AdvancedBlendingSequence',
+ 'ReferencedImageSequence',
+ ]
+ )
+ ref_img_sequence = deepcopy(sequence)
+ ref_img_sequence.__class__ = ReferencedImageSequence
+ return cast(ReferencedImageSequence, ref_img_sequence)
+
class LUT(Dataset):
diff --git a/src/highdicom/legacy/sop.py b/src/highdicom/legacy/sop.py
index 3ae20849..d7c3df4f 100644
--- a/src/highdicom/legacy/sop.py
+++ b/src/highdicom/legacy/sop.py
@@ -17,8 +17,6 @@
from highdicom.base import SOPClass
from highdicom.frame import encode_frame
-from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP
-from highdicom._modules import MODULE_ATTRIBUTE_MAP
logger = logging.getLogger(__name__)
@@ -60,6 +58,8 @@ def _convert_legacy_to_enhanced(
which instances are provided via `sf_datasets`.
"""
+ from highdicom._iods import IOD_MODULE_MAP, SOP_CLASS_UID_IOD_KEY_MAP
+ from highdicom._modules import MODULE_ATTRIBUTE_MAP
try:
ref_ds = sf_datasets[0]
except IndexError:
diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py
index ff4b7fe6..9617f151 100644
--- a/src/highdicom/seg/content.py
+++ b/src/highdicom/seg/content.py
@@ -77,7 +77,7 @@ def __init__(
"Anatomic Region", :dcm:`CID 4031 `
"Common Anatomic Regions", as as well as other CIDs for
domain-specific anatomic regions)
- primary_anatomic_structures: Union[Sequence[Union[highdicom.sr.Code, highdicom.sr.CodedConcept]], None], optional
+ primary_anatomic_structures: Union[Sequence[Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept]], None], optional
Anatomic structure(s) the segment represents
(see CIDs for domain-specific primary anatomic structures)
diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py
index 581ac851..0b597741 100644
--- a/src/highdicom/seg/sop.py
+++ b/src/highdicom/seg/sop.py
@@ -312,6 +312,8 @@ def __init__(
if pixel_array.ndim == 2:
pixel_array = pixel_array[np.newaxis, ...]
+ if pixel_array.ndim not in [3, 4]:
+ raise ValueError('Pixel array must be a 2D, 3D, or 4D array.')
super().__init__(
study_instance_uid=src_img.StudyInstanceUID,
@@ -568,17 +570,6 @@ def __init__(
self.NumberOfFrames = 0
self.PerFrameFunctionalGroupsSequence: List[Dataset] = []
- if pixel_array.ndim == 2:
- pixel_array = pixel_array[np.newaxis, ...]
- if pixel_array.ndim not in [3, 4]:
- raise ValueError('Pixel array must be a 2D, 3D, or 4D array.')
-
- if pixel_array.shape[1:3] != (self.Rows, self.Columns):
- raise ValueError(
- 'Pixel array representing segments has the wrong number of '
- 'rows and columns.'
- )
-
# Check segment numbers
described_segment_numbers = np.array([
int(item.SegmentNumber)
@@ -593,6 +584,13 @@ def __init__(
segmentation_type
)
self.SegmentsOverlap = segments_overlap.value
+ if omit_empty_frames and pixel_array.sum() == 0:
+ omit_empty_frames = False
+ logger.warning(
+ 'Encoding an empty segmentation with "omit_empty_frames" '
+ 'set to True. Reverting to encoding all frames since omitting '
+ 'all frames is not possible.'
+ )
if has_ref_frame_uid:
if plane_positions is None:
diff --git a/src/highdicom/sr/coding.py b/src/highdicom/sr/coding.py
index d6ee9b50..6e033a46 100644
--- a/src/highdicom/sr/coding.py
+++ b/src/highdicom/sr/coding.py
@@ -1,3 +1,4 @@
+from copy import deepcopy
import logging
from typing import Any, Optional, Union
@@ -64,13 +65,15 @@ def __eq__(self, other: Any) -> bool:
whether `self` and `other` are considered equal
"""
- this = Code(
- self.value,
- self.scheme_designator,
- self.meaning,
- self.scheme_version
- )
- return Code.__eq__(this, other)
+ if isinstance(other, (Code, CodedConcept)):
+ this = Code(
+ self.value,
+ self.scheme_designator,
+ self.meaning,
+ self.scheme_version
+ )
+ return Code.__eq__(this, other)
+ return super().__eq__(other)
def __ne__(self, other: Any) -> bool:
"""Compares `self` and `other` for inequality.
@@ -121,12 +124,9 @@ def from_dataset(cls, dataset: Dataset) -> 'CodedConcept':
'Dataset does not contain the following attribute '
f'required for coded concepts: {kw}.'
)
- return cls(
- value=dataset.CodeValue,
- scheme_designator=dataset.CodingSchemeDesignator,
- meaning=dataset.CodeMeaning,
- scheme_version=getattr(dataset, 'CodingSchemeVersion', None)
- )
+ concept = deepcopy(dataset)
+ concept.__class__ = cls
+ return concept
@classmethod
def from_code(cls, code: Union[Code, 'CodedConcept']) -> 'CodedConcept':
diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py
index 3327b2df..6fe47f8b 100644
--- a/src/highdicom/sr/content.py
+++ b/src/highdicom/sr/content.py
@@ -1268,7 +1268,7 @@ def __init__(
modifier_item = CodeContentItem(
name=CodedConcept(
value='106233006',
- meaning='Topographical Modifier',
+ meaning='Topographical modifier',
scheme_designator='SCT'
),
value=topographical_modifier,
diff --git a/src/highdicom/sr/templates.py b/src/highdicom/sr/templates.py
index fc4ee697..8583b6e7 100644
--- a/src/highdicom/sr/templates.py
+++ b/src/highdicom/sr/templates.py
@@ -1,7 +1,8 @@
"""DICOM structured reporting templates."""
+import collections
import logging
from copy import deepcopy
-from typing import cast, Iterable, List, Optional, Sequence, Tuple, Union
+from typing import cast, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from pydicom.dataset import Dataset
from pydicom.sr.coding import Code
@@ -22,6 +23,7 @@
SourceImageForSegmentation,
SourceSeriesForSegmentation
)
+
from highdicom.sr.enum import (
GraphicTypeValues,
GraphicTypeValues3D,
@@ -29,7 +31,10 @@
ValueTypeValues,
)
from highdicom.uid import UID
-from highdicom.sr.utils import find_content_items, get_coded_name
+from highdicom.sr.utils import (
+ find_content_items,
+ get_coded_name
+)
from highdicom.sr.value_types import (
CodeContentItem,
ContainerContentItem,
@@ -42,7 +47,7 @@
UIDRefContentItem,
)
-
+from highdicom._module_utils import does_iod_have_pixel_data
# Codes missing from pydicom
DEFAULT_LANGUAGE = CodedConcept(
value='en-US',
@@ -489,6 +494,143 @@ def _contains_image_items(
return False
+def _get_coded_modality(sop_class_uid: str) -> Code:
+ """Get the coded modality for a SOP Class UID of an Image.
+
+ Parameters
+ ----------
+ sop_class_uid: str
+ SOP Class UID
+
+ Returns
+ -------
+ pydicom.sr.coding.Code
+ Coded Acquisition Modality
+ (see :dcm:`CID 29 Acquisition Modality `)
+
+ Raises
+ ------
+ ValueError
+ if the SOP Class UID does not identify a SOP Class
+ for storage of an Image information entity
+
+ """ # noqa: E501
+ sopclass_to_modality_map: Dict[str, Code] = {
+ '1.2.840.10008.5.1.4.1.1.1': codes.cid29.ComputedRadiography,
+ '1.2.840.10008.5.1.4.1.1.1.1': codes.cid29.DigitalRadiography,
+ '1.2.840.10008.5.1.4.1.1.1.1.1': codes.cid29.DigitalRadiography,
+ '1.2.840.10008.5.1.4.1.1.1.2': codes.cid29.Mammography,
+ '1.2.840.10008.5.1.4.1.1.1.2.1': codes.cid29.Mammography,
+ '1.2.840.10008.5.1.4.1.1.1.3': codes.cid29.IntraOralRadiography,
+ '1.2.840.10008.5.1.4.1.1.1.3.1': codes.cid29.IntraOralRadiography,
+ '1.2.840.10008.5.1.4.1.1.2': codes.cid29.ComputedTomography,
+ '1.2.840.10008.5.1.4.1.1.2.1': codes.cid29.ComputedTomography,
+ '1.2.840.10008.5.1.4.1.1.2.2': codes.cid29.ComputedTomography,
+ '1.2.840.10008.5.1.4.1.1.3.1': codes.cid29.Ultrasound,
+ '1.2.840.10008.5.1.4.1.1.4': codes.cid29.MagneticResonance,
+ '1.2.840.10008.5.1.4.1.1.4.1': codes.cid29.MagneticResonance,
+ '1.2.840.10008.5.1.4.1.1.4.2': codes.cid29.MagneticResonance,
+ '1.2.840.10008.5.1.4.1.1.4.3': codes.cid29.MagneticResonance,
+ '1.2.840.10008.5.1.4.1.1.4.4': codes.cid29.MagneticResonance,
+ '1.2.840.10008.5.1.4.1.1.6.1': codes.cid29.Ultrasound,
+ '1.2.840.10008.5.1.4.1.1.6.2': codes.cid29.Ultrasound,
+ '1.2.840.10008.5.1.4.1.1.7': codes.cid32.Other,
+ '1.2.840.10008.5.1.4.1.1.7.1': codes.cid32.Other,
+ '1.2.840.10008.5.1.4.1.1.7.2': codes.cid32.Other,
+ '1.2.840.10008.5.1.4.1.1.7.3': codes.cid32.Other,
+ '1.2.840.10008.5.1.4.1.1.7.4': codes.cid32.Other,
+ '1.2.840.10008.5.1.4.1.1.9.1.1': codes.cid29.Electrocardiography,
+ '1.2.840.10008.5.1.4.1.1.9.1.2': codes.cid29.Electrocardiography,
+ '1.2.840.10008.5.1.4.1.1.9.1.3': codes.cid29.Electrocardiography,
+ '1.2.840.10008.5.1.4.1.1.9.2.1': codes.cid29.HemodynamicWaveform,
+ '1.2.840.10008.5.1.4.1.1.9.3.1': codes.cid29.Electrocardiography,
+ '1.2.840.10008.5.1.4.1.1.9.4.1': codes.cid32.BasicVoiceAudio,
+ '1.2.840.10008.5.1.4.1.1.9.5.1': codes.cid29.HemodynamicWaveform,
+ '1.2.840.10008.5.1.4.1.1.9.6.1': codes.cid29.RespiratoryWaveform,
+ '1.2.840.10008.5.1.4.1.1.11.1': codes.cid32.PresentationState,
+ '1.2.840.10008.5.1.4.1.1.11.2': codes.cid32.PresentationState,
+ '1.2.840.10008.5.1.4.1.1.11.3': codes.cid32.PresentationState,
+ '1.2.840.10008.5.1.4.1.1.11.4': codes.cid32.PresentationState,
+ '1.2.840.10008.5.1.4.1.1.12.1': codes.cid29.XRayAngiography,
+ '1.2.840.10008.5.1.4.1.1.12.1.1': codes.cid29.XRayAngiography,
+ '1.2.840.10008.5.1.4.1.1.12.2': codes.cid29.Radiofluoroscopy,
+ '1.2.840.10008.5.1.4.1.1.12.2.1': codes.cid29.Radiofluoroscopy,
+ '1.2.840.10008.5.1.4.1.1.13.1.1': codes.cid29.XRayAngiography,
+ '1.2.840.10008.5.1.4.1.1.13.1.2': codes.cid29.DigitalRadiography,
+ '1.2.840.10008.5.1.4.1.1.13.1.3': codes.cid29.Mammography,
+ '1.2.840.10008.5.1.4.1.1.14.1': codes.cid29.IntravascularOpticalCoherenceTomography, # noqa E501
+ '1.2.840.10008.5.1.4.1.1.14.2': codes.cid29.IntravascularOpticalCoherenceTomography, # noqa E501
+ '1.2.840.10008.5.1.4.1.1.20': codes.cid29.NuclearMedicine,
+ '1.2.840.10008.5.1.4.1.1.66.1': codes.cid32.Registration,
+ '1.2.840.10008.5.1.4.1.1.66.2': codes.cid32.SpatialFiducials,
+ '1.2.840.10008.5.1.4.1.1.66.3': codes.cid32.Registration,
+ '1.2.840.10008.5.1.4.1.1.66.4': codes.cid32.Segmentation,
+ '1.2.840.10008.5.1.4.1.1.67': codes.cid32.RealWorldValueMap,
+ '1.2.840.10008.5.1.4.1.1.68.1': codes.cid29.OpticalSurfaceScanner,
+ '1.2.840.10008.5.1.4.1.1.68.2': codes.cid29.OpticalSurfaceScanner,
+ '1.2.840.10008.5.1.4.1.1.77.1.1': codes.cid29.Endoscopy,
+ '1.2.840.10008.5.1.4.1.1.77.1.1.1': codes.cid29.Endoscopy,
+ '1.2.840.10008.5.1.4.1.1.77.1.2': codes.cid29.GeneralMicroscopy,
+ '1.2.840.10008.5.1.4.1.1.77.1.2.1': codes.cid29.GeneralMicroscopy,
+ '1.2.840.10008.5.1.4.1.1.77.1.3': codes.cid29.SlideMicroscopy,
+ '1.2.840.10008.5.1.4.1.1.77.1.4': codes.cid29.ExternalCameraPhotography,
+ '1.2.840.10008.5.1.4.1.1.77.1.4.1': codes.cid29.ExternalCameraPhotography, # noqa E501
+ '1.2.840.10008.5.1.4.1.1.77.1.5.1': codes.cid29.OphthalmicPhotography,
+ '1.2.840.10008.5.1.4.1.1.77.1.5.2': codes.cid29.OphthalmicPhotography,
+ '1.2.840.10008.5.1.4.1.1.77.1.5.3': codes.cid32.StereometricRelationship, # noqa E501
+ '1.2.840.10008.5.1.4.1.1.77.1.5.4': codes.cid29.OphthalmicTomography,
+ '1.2.840.10008.5.1.4.1.1.77.1.6': codes.cid29.SlideMicroscopy,
+ '1.2.840.10008.5.1.4.1.1.78.1': codes.cid29.Lensometry,
+ '1.2.840.10008.5.1.4.1.1.78.2': codes.cid29.Autorefraction,
+ '1.2.840.10008.5.1.4.1.1.78.3': codes.cid29.Keratometry,
+ '1.2.840.10008.5.1.4.1.1.78.4': codes.cid29.SubjectiveRefraction,
+ '1.2.840.10008.5.1.4.1.1.78.5': codes.cid29.VisualAcuity,
+ '1.2.840.10008.5.1.4.1.1.78.7': codes.cid29.OphthalmicAxialMeasurements,
+ '1.2.840.10008.5.1.4.1.1.78.8': codes.cid32.IntraocularLensCalculation,
+ '1.2.840.10008.5.1.4.1.1.80.1': codes.cid29.OphthalmicVisualField,
+ '1.2.840.10008.5.1.4.1.1.81.1': codes.cid29.OphthalmicMapping,
+ '1.2.840.10008.5.1.4.1.1.82.1': codes.cid29.OphthalmicMapping,
+ '1.2.840.10008.5.1.4.1.1.88.11': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.22': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.33': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.34': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.35': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.50': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.59': codes.cid32.KeyObjectSelection,
+ '1.2.840.10008.5.1.4.1.1.88.65': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.67': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.68': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.70': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.71': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.72': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.73': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.74': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.75': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.88.76': codes.cid32.StructuredReportDocument,
+ '1.2.840.10008.5.1.4.1.1.90.1': codes.cid32.ContentAssessmentResult,
+ '1.2.840.10008.5.1.4.1.1.128': codes.cid29.PositronEmissionTomography,
+ '1.2.840.10008.5.1.4.1.1.130': codes.cid29.PositronEmissionTomography,
+ '1.2.840.10008.5.1.4.1.1.128.1': codes.cid29.PositronEmissionTomography,
+ '1.2.840.10008.5.1.4.1.1.200.2': codes.cid32.CTProtocol,
+ '1.2.840.10008.5.1.4.1.1.481.1': codes.cid29.RTImage,
+ '1.2.840.10008.5.1.4.1.1.481.2': codes.cid32.RTDose,
+ '1.2.840.10008.5.1.4.1.1.481.3': codes.cid32.RTStructureSet,
+ '1.2.840.10008.5.1.4.1.1.481.4': codes.cid32.RTTreatmentRecord,
+ '1.2.840.10008.5.1.4.1.1.481.5': codes.cid32.RTPlan,
+ '1.2.840.10008.5.1.4.1.1.481.6': codes.cid32.RTTreatmentRecord,
+ '1.2.840.10008.5.1.4.1.1.481.7': codes.cid32.RTTreatmentRecord,
+ '1.2.840.10008.5.1.4.1.1.481.8': codes.cid32.RTPlan,
+ '1.2.840.10008.5.1.4.1.1.481.9': codes.cid32.RTTreatmentRecord,
+ }
+ try:
+ return sopclass_to_modality_map[sop_class_uid]
+ except KeyError:
+ raise ValueError(
+ 'SOP Class UID does not identify a SOP Class '
+ 'for storage of an image information entity.'
+ )
+
+
class Template(ContentSequence):
"""Abstract base class for a DICOM SR template."""
@@ -3644,60 +3786,49 @@ def from_sequence(
class ImageLibraryEntryDescriptors(Template):
- """`TID 1602 `_
- Image Library Entry Descriptors""" # noqa: E501
+ """:dcm:`TID 1602 Image Library Entry Descriptors
+ `
+ """ # noqa: E501
def __init__(
self,
- modality: Union[Code, CodedConcept],
- frame_of_reference_uid: str,
- pixel_data_rows: int,
- pixel_data_columns: int,
+ image: Dataset,
additional_descriptors: Optional[Sequence[ContentItem]] = None
) -> None:
"""
+
Parameters
----------
- modality: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code]
- Modality
- frame_of_reference_uid: str
- Frame of Reference UID
- pixel_data_rows: int
- Number of rows in pixel data frames
- pixel_data_columns: int
- Number of rows in pixel data frames
+ image: pydicom.dataset.Dataset
+ Metadata of a referenced image instance
additional_descriptors: Union[Sequence[highdicom.sr.ContentItem], None], optional
- Additional SR Content Items that should be included
+ Optional additional SR Content Items that should be included
+ for description of the referenced image
""" # noqa: E501
super().__init__()
+ modality = _get_coded_modality(image.SOPClassUID)
+ if not does_iod_have_pixel_data(image.SOPClassUID):
+ raise ValueError(
+ f'Dataset with SOPInstanceUID {image.SOPInstanceUID}'
+ 'is not a DICOM image')
+
modality_item = CodeContentItem(
- name=CodedConcept(
- value='121139',
- meaning='Modality',
- scheme_designator='DCM'
- ),
+ name=codes.DCM.Modality,
value=modality,
relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
)
self.append(modality_item)
- frame_of_reference_uid_item = UIDRefContentItem(
- name=CodedConcept(
- value='112227',
- meaning='Frame of Reference UID',
- scheme_designator='DCM'
- ),
- value=frame_of_reference_uid,
- relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
- )
- self.append(frame_of_reference_uid_item)
+ if 'FrameOfReferenceUID' in image:
+ frame_of_reference_uid_item = UIDRefContentItem(
+ name=codes.DCM.FrameOfReferenceUID,
+ value=image.FrameOfReferenceUID,
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
+ )
+ self.append(frame_of_reference_uid_item)
pixel_data_rows_item = NumContentItem(
- name=CodedConcept(
- value='110910',
- meaning='Pixel Data Rows',
- scheme_designator='DCM'
- ),
- value=pixel_data_rows,
+ name=codes.DCM.PixelDataRows,
+ value=image.Rows,
relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
unit=CodedConcept(
value='{pixels}',
@@ -3707,12 +3838,8 @@ def __init__(
)
self.append(pixel_data_rows_item)
pixel_data_cols_item = NumContentItem(
- name=CodedConcept(
- value='110911',
- meaning='Pixel Data Columns',
- scheme_designator='DCM'
- ),
- value=pixel_data_columns,
+ name=codes.DCM.PixelDataColumns,
+ value=image.Columns,
relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
unit=CodedConcept(
value='{pixels}',
@@ -3721,6 +3848,16 @@ def __init__(
)
)
self.append(pixel_data_cols_item)
+
+ if self._is_cross_sectional(image):
+ modality_descriptors = \
+ self._generate_cross_sectional_descriptors(image)
+ self.extend(modality_descriptors)
+ elif self._is_projection_radiography(image):
+ modality_descriptors = \
+ self._generate_projection_radiography_descriptors(image)
+ self.extend(modality_descriptors)
+
if additional_descriptors is not None:
for item in additional_descriptors:
if not isinstance(item, ContentItem):
@@ -3732,6 +3869,188 @@ def __init__(
item.RelationshipType = relationship_type.value
self.append(item)
+ def _generate_projection_radiography_descriptors(
+ self,
+ dataset: Dataset
+ ) -> Sequence[ContentItem]:
+ """Generate descriptors for projection radiography modalities.
+ :dcm:`TID 1603 `
+ Image Library Entry Descriptors for Projection Radiography
+
+ Parameters
+ ----------
+ pydicom.Dataset
+ Metadata of a projection radiology image
+
+ Returns
+ -------
+ Sequence[highdicom.sr.ContentItem]
+ SR Content Items describing the image
+
+ """ # noqa: E501
+ patient_orientation = dataset.PatientOrientation
+ pixel_spacing = dataset.ImagerPixelSpacing
+ descriptors = [
+ TextContentItem(
+ name=codes.DCM.PatientOrientationRow,
+ value=patient_orientation[0],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ ),
+ TextContentItem(
+ name=codes.DCM.PatientOrientationColumn,
+ value=patient_orientation[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ ),
+ NumContentItem(
+ name=codes.DCM.HorizontalPixelSpacing,
+ value=pixel_spacing[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.VerticalPixelSpacing,
+ value=pixel_spacing[0],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ )]
+
+ return descriptors
+
+ def _generate_cross_sectional_descriptors(
+ self,
+ dataset: Dataset
+ ) -> Sequence[ContentItem]:
+ """Generate descriptors for cross-sectional modalities.
+
+ :dcm:`TID 1604 Image Library Entry Descriptors for Cross-Sectional Modalities `
+
+ Parameters
+ ----------
+ dataset: pydicom.Dataset
+ A pydicom Dataset of a cross-sectional image.
+
+ Returns
+ -------
+ Sequence[highdicom.sr.ContentItem]
+ SR Content Items describing the image.
+
+ """ # noqa: E501
+ pixel_spacing = dataset.PixelSpacing
+ image_orientation = dataset.ImageOrientationPatient
+ image_position = dataset.ImagePositionPatient
+
+ descriptors = [
+ NumContentItem(
+ name=codes.DCM.HorizontalPixelSpacing,
+ value=pixel_spacing[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.VerticalPixelSpacing,
+ value=pixel_spacing[0],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.SpacingBetweenSlices,
+ value=dataset.SpacingBetweenSlices,
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.SliceThickness,
+ value=dataset.SliceThickness,
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.ImagePositionPatientX,
+ value=image_position[0],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.ImagePositionPatientY,
+ value=image_position[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.ImagePositionPatientZ,
+ value=image_position[2],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=codes.UCUM.Millimeter
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientRowX,
+ value=image_orientation[0],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientRowY,
+ value=image_orientation[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientRowZ,
+ value=image_orientation[2],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientColumnX,
+ value=image_orientation[3],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientColumnY,
+ value=image_orientation[4],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ ),
+ NumContentItem(
+ name=codes.DCM.ImageOrientationPatientColumnZ,
+ value=image_orientation[5],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT,
+ unit=CodedConcept(
+ value='{-1:1}',
+ meaning='{-1:1}',
+ scheme_designator='UCUM'
+ )
+ )
+ ]
+ return descriptors
+
+ def _is_cross_sectional(self, ds: Dataset) -> bool:
+ return ds.Modality in ['CT', 'MR', 'PT']
+
+ def _is_projection_radiography(self, ds: Dataset) -> bool:
+ return ds.Modality in ['CR', 'DX', 'IO', 'MG', 'PX', 'RF', 'RG', 'XA']
+
class MeasurementReport(Template):
@@ -3759,8 +4078,8 @@ def __init__(
language_of_content_item_and_descendants: Optional[
LanguageOfContentItemAndDescendants
] = None,
- image_library_groups: Optional[
- Sequence[ImageLibraryEntryDescriptors]
+ referenced_images: Optional[
+ Sequence[Dataset]
] = None
):
"""
@@ -3782,8 +4101,8 @@ def __init__(
language_of_content_item_and_descendants: Union[highdicom.sr.LanguageOfContentItemAndDescendants, None], optional
specification of the language of report content items
(defaults to English)
- image_library_groups: Union[Sequence[highdicom.sr.ImageLibraryEntry], None], optional
- Entry descriptors for each image library group
+ referenced_images: Union[Sequence[pydicom.Dataset], None], optional
+ Images that should be included in the library
""" # noqa: E501
if title is None:
@@ -3808,18 +4127,14 @@ def __init__(
procedure_reported = [procedure_reported]
for procedure in procedure_reported:
procedure_item = CodeContentItem(
- name=CodedConcept(
- value='121058',
- meaning='Procedure reported',
- scheme_designator='DCM',
- ),
+ name=codes.DCM.ProcedureReported,
value=procedure,
relationship_type=RelationshipTypeValues.HAS_CONCEPT_MOD
)
item.ContentSequence.append(procedure_item)
-
- image_library_item = ImageLibrary(image_library_groups)
- item.ContentSequence.extend(image_library_item)
+ if referenced_images:
+ image_library = ImageLibrary(referenced_images)
+ item.ContentSequence.extend(image_library)
measurements: Union[
MeasurementsAndQualitativeEvaluations,
@@ -3833,11 +4148,7 @@ def __init__(
MeasurementsAndQualitativeEvaluations,
)
container_item = ContainerContentItem(
- name=CodedConcept(
- value='126010',
- meaning='Imaging Measurements',
- scheme_designator='DCM'
- ),
+ name=codes.DCM.ImagingMeasurements,
relationship_type=RelationshipTypeValues.CONTAINS
)
container_item.ContentSequence = ContentSequence()
@@ -4625,51 +4936,105 @@ def get_image_measurement_groups(
return sequences
+class ImageLibraryEntry(Template):
+
+ """:dcm:`TID 1601 Image Library Entry `
+ """ # noqa: E501
+
+ def __init__(
+ self,
+ dataset: Dataset,
+ ) -> None:
+ """
+ Parameters
+ ----------
+ dataset: pydicom.dataset.Dataset
+ Image to include in image library
+
+ """
+ super().__init__()
+
+ library_item_entry = ImageLibraryEntryDescriptors(dataset)
+ group_item = ContainerContentItem(
+ name=codes.DCM.ImageLibraryGroup,
+ relationship_type=RelationshipTypeValues.CONTAINS
+ )
+
+ group_item.ContentSequence = library_item_entry
+ self.append(group_item)
+
+
class ImageLibrary(Template):
- """:dcm:`TID 1600 ` Image Library"""
+ """:dcm:`TID 1600 Image Library `
+ """ # noqa: E501
def __init__(
self,
- groups: Optional[Sequence[ImageLibraryEntryDescriptors]] = None
+ datasets: Sequence[Dataset]
) -> None:
"""
Parameters
----------
- groups: Union[Sequence[Sequence[highdicom.sr.ImageLibraryEntryDescriptors]], None], optional
- Entry descriptors for each image library group
+ datasets: Sequence[pydicom.dataset.Dataset]
+ Image Datasets to include in image library. Non-image
+ objects will throw an exception.
- """ # noqa: E501
+ """
super().__init__()
library_item = ContainerContentItem(
- name=CodedConcept(
- value='111028',
- meaning='Image Library',
- scheme_designator='DCM'
- ),
+ name=codes.DCM.ImageLibrary,
relationship_type=RelationshipTypeValues.CONTAINS
)
- content = ContentSequence()
- if groups is not None:
- for descriptor_items in groups:
- group_item = ContainerContentItem(
+ library_item.ContentSequence = ContentSequence()
+ if datasets is not None:
+ groups = collections.defaultdict(list)
+ for ds in datasets:
+ modality = _get_coded_modality(ds.SOPClassUID)
+ image_item = ImageContentItem(
name=CodedConcept(
- value='126200',
- meaning='Image Library Group',
- scheme_designator='DCM'
+ value='260753009',
+ meaning='Source',
+ scheme_designator='SCT'
),
+ referenced_sop_instance_uid=ds.SOPInstanceUID,
+ referenced_sop_class_uid=ds.SOPClassUID,
relationship_type=RelationshipTypeValues.CONTAINS
)
- group_item.ContentSequence = descriptor_items
- # The Image Library Entry template contains the individual
- # Image Library Entry Descriptors content items.
- if not isinstance(descriptor_items,
- ImageLibraryEntryDescriptors):
- raise TypeError(
- 'Image library group items must have type '
- '"ImageLibraryEntry".'
+ descriptors = ImageLibraryEntryDescriptors(ds)
+
+ image_item.ContentSequence = ContentSequence()
+ image_item.ContentSequence.extend(descriptors)
+ if 'FrameOfReferenceUID' in ds:
+ # Only type 1 attributes
+ shared_descriptors = (
+ modality,
+ ds.FrameOfReferenceUID,
)
- content.append(group_item)
- if len(content) > 0:
- library_item.ContentSequence = content
+ else:
+ shared_descriptors = (
+ modality,
+ )
+ groups[shared_descriptors].append(image_item)
+
+ for shared_descriptors, image_items in groups.items():
+ image = image_items[0]
+ group_item = ContainerContentItem(
+ name=codes.DCM.ImageLibraryGroup,
+ relationship_type=RelationshipTypeValues.CONTAINS
+ )
+ group_item.ContentSequence = ContentSequence()
+
+ if 'FrameOfReferenceUID' in image:
+ group_item.ContentSequence.append(
+ UIDRefContentItem(
+ name=codes.DCM.FrameOfReferenceUID,
+ value=shared_descriptors[1],
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT # noqa: E501
+ )
+ )
+ group_item.ContentSequence.extend(image_items)
+ if len(group_item) > 0:
+ library_item.ContentSequence.append(group_item)
+
self.append(library_item)
diff --git a/src/highdicom/version.py b/src/highdicom/version.py
index 949f02c2..2f15b8cd 100644
--- a/src/highdicom/version.py
+++ b/src/highdicom/version.py
@@ -1 +1 @@
-__version__ = '0.18.4'
+__version__ = '0.20.0'
diff --git a/tests/test_ann.py b/tests/test_ann.py
index 39b39c1c..fe587934 100644
--- a/tests/test_ann.py
+++ b/tests/test_ann.py
@@ -8,6 +8,7 @@
from pydicom.filereader import dcmread
from pydicom.sr.codedict import codes
from pydicom.sr.coding import Code
+from pydicom.uid import VLWholeSlideMicroscopyImageStorage
from highdicom.ann.content import Measurements, AnnotationGroup
from highdicom.ann.enum import (
@@ -16,7 +17,10 @@
GraphicTypeValues,
)
from highdicom.ann.sop import MicroscopyBulkSimpleAnnotations
-from highdicom.content import AlgorithmIdentificationSequence
+from highdicom.content import (
+ AlgorithmIdentificationSequence,
+ ReferencedImageSequence,
+)
from highdicom.sr.coding import CodedConcept
from highdicom.uid import UID
@@ -61,6 +65,26 @@ def test_construction(self):
values[stored_indices]
)
+ def test_construction_with_referenced_image(self):
+ optical_path_item = Dataset()
+ optical_path_item.OpticalPathIdentifier = '1'
+ image = Dataset()
+ image.SOPInstanceUID = '1.2.3.4'
+ image.SOPClassUID = VLWholeSlideMicroscopyImageStorage
+ image.OpticalPathSequence = [optical_path_item]
+
+ measurements = Measurements(
+ name=Code('Q4LE', 'SBSI', 'Mean intensity'),
+ values=np.ones((10, ), dtype=np.float32),
+ unit=Code('{counts}', 'UCUM', 'Counts'),
+ referenced_images=ReferencedImageSequence(
+ referenced_images=[image],
+ referenced_optical_path_identifier='1'
+ )
+ )
+ assert hasattr(measurements, 'ReferencedImageSequence')
+ assert len(measurements.ReferencedImageSequence) == 1
+
def test_construction_missing_name(self):
with pytest.raises(TypeError):
Measurements(
@@ -100,12 +124,19 @@ def test_alternative_construction_from_dataset(self):
measurement_values.FloatingPointValues = values.tobytes()
measurement_values.AnnotationIndexList = index.tobytes()
dataset.MeasurementValuesSequence = [measurement_values]
+ referenced_image = Dataset()
+ referenced_image.ReferencedOpticalPathIdentifier = '1'
+ referenced_image.ReferencedSOPInstanceUID = '1.2.3'
+ referenced_image.ReferencedSOPClassUID = \
+ VLWholeSlideMicroscopyImageStorage
+ dataset.ReferencedImageSequence = [referenced_image]
measurements = Measurements.from_dataset(dataset)
assert measurements.name == CodedConcept.from_dataset(name)
assert measurements.unit == CodedConcept.from_dataset(unit)
np.testing.assert_allclose(measurements.get_values(3), values)
+ assert len(measurements.referenced_images) == 1
class TestAnnotationGroup(unittest.TestCase):
@@ -202,7 +233,7 @@ def test_construction(self):
graphic_data[1]
)
- names, values, units = group.get_measurements()
+ names, values, units, ref_images = group.get_measurements()
assert len(names) == 1
assert names[0] == measurement_names[0]
assert len(units) == 1
@@ -210,8 +241,10 @@ def test_construction(self):
assert values.dtype == np.float32
assert values.shape == (2, 1)
np.testing.assert_allclose(values, measurement_values)
+ assert len(ref_images) == 1
+ assert ref_images[0] is None
- names, values, units = group.get_measurements(
+ names, values, units, ref_images = group.get_measurements(
name=measurement_names[0]
)
assert len(names) == 1
@@ -221,8 +254,10 @@ def test_construction(self):
assert values.dtype == np.float32
assert values.shape == (2, 1)
np.testing.assert_allclose(values, measurement_values)
+ assert len(ref_images) == 1
+ assert ref_images[0] is None
- names, values, units = group.get_measurements(
+ names, values, units, ref_images = group.get_measurements(
name=codes.SCT.Volume
)
assert names == []
@@ -230,6 +265,7 @@ def test_construction(self):
assert values.size == 0
assert values.dtype == np.float32
assert values.shape == (2, 0)
+ assert ref_images == []
def test_alternative_construction_from_dataset(self):
coordinates_data = np.array(
@@ -282,7 +318,7 @@ def test_alternative_construction_from_dataset(self):
np.array([[1.0, 1.0]], dtype=np.double)
)
- names, values, units = group.get_measurements()
+ names, values, units, ref_images = group.get_measurements()
assert names == []
assert units == []
assert values.size == 0
diff --git a/tests/test_content.py b/tests/test_content.py
index f28568f9..cf64cac3 100644
--- a/tests/test_content.py
+++ b/tests/test_content.py
@@ -742,9 +742,8 @@ def setUp(self):
for f in get_testdata_files('dicomdirtests/77654033/CT2/*')
]
self._ct_multiframe = dcmread(get_testdata_file('eCT_Supplemental.dcm'))
- self._seg = dcmread(
- 'data/test_files/seg_image_ct_binary_overlap.dcm'
- )
+ self._sm = dcmread('data/test_files/sm_image.dcm')
+ self._seg = dcmread('data/test_files/seg_image_ct_binary_overlap.dcm')
def test_construction_ref_ims(self):
ref_ims = ReferencedImageSequence(
@@ -812,9 +811,9 @@ def test_construction_segment_number(self):
assert ref_ims[0].ReferencedSegmentNumber == 1
def test_construction_segment_number_non_seg(self):
- with pytest.raises(ValueError):
+ with pytest.raises(TypeError):
ReferencedImageSequence(
- referenced_images=self._ct_series,
+ referenced_images=self._ct_series[0],
referenced_segment_number=1
)
@@ -873,6 +872,37 @@ def test_construction_duplicate(self):
referenced_images=self._ct_series * 2,
)
+ def test_construction_optical_path_identifier(self):
+ ref_ims = ReferencedImageSequence(
+ referenced_images=[self._sm],
+ referenced_optical_path_identifier='1'
+ )
+ assert len(ref_ims) == 1
+ assert ref_ims[0].ReferencedOpticalPathIdentifier == '1'
+
+ def test_construction_optical_path_identifier_invalid_reference(self):
+ with pytest.raises(ValueError):
+ ReferencedImageSequence(
+ referenced_images=[self._sm],
+ referenced_optical_path_identifier='20'
+ )
+
+ def test_construction_optical_path_identifier_non_sm(self):
+ with pytest.raises(TypeError):
+ ReferencedImageSequence(
+ referenced_images=[self._seg],
+ referenced_optical_path_identifier='1'
+ )
+
+ def test_construction_optical_path_identifier_and_frame_numbers(self):
+ ref_ims = ReferencedImageSequence(
+ referenced_images=[self._sm],
+ referenced_optical_path_identifier='1',
+ referenced_frame_number=[1, 2],
+ )
+ assert len(ref_ims) == 1
+ assert ref_ims[0].ReferencedOpticalPathIdentifier == '1'
+
class TestPaletteColorLUT(TestCase):
diff --git a/tests/test_frame.py b/tests/test_frame.py
index c10b1417..2e4d424d 100644
--- a/tests/test_frame.py
+++ b/tests/test_frame.py
@@ -215,6 +215,7 @@ def test_jpeg2000_monochrome(self):
np.testing.assert_array_equal(frame, decoded_frame)
def test_jpegls_rgb(self):
+ pytest.importorskip("libjpeg")
bits_allocated = 8
frame = np.ones((16, 32, 3), dtype=np.dtype(f'uint{bits_allocated}'))
frame *= 255
@@ -244,6 +245,7 @@ def test_jpegls_rgb(self):
np.testing.assert_array_equal(frame, decoded_frame)
def test_jpegls_monochrome(self):
+ pytest.importorskip("libjpeg")
bits_allocated = 16
frame = np.zeros((16, 32), dtype=np.dtype(f'uint{bits_allocated}'))
compressed_frame = encode_frame(
diff --git a/tests/test_pm.py b/tests/test_pm.py
index f4b504ef..aa297da4 100644
--- a/tests/test_pm.py
+++ b/tests/test_pm.py
@@ -584,6 +584,7 @@ def test_multi_frame_sm_image_ushort_encapsulated_jpeg2000(self):
assert np.array_equal(pmap.pixel_array, pixel_array)
def test_multi_frame_sm_image_ushort_encapsulated_jpegls(self):
+ pytest.importorskip("libjpeg")
pixel_array = np.random.randint(
low=0,
high=2**8,
diff --git a/tests/test_sc.py b/tests/test_sc.py
index ab3eab49..72e6636e 100644
--- a/tests/test_sc.py
+++ b/tests/test_sc.py
@@ -428,6 +428,7 @@ def test_rgb_jpeg2000(self):
)
def test_monochrome_jpegls(self):
+ pytest.importorskip("libjpeg")
bits_allocated = 16
photometric_interpretation = 'MONOCHROME2'
coordinate_system = 'PATIENT'
@@ -455,6 +456,7 @@ def test_monochrome_jpegls(self):
)
def test_rgb_jpegls(self):
+ pytest.importorskip("libjpeg")
bits_allocated = 8
photometric_interpretation = 'YBR_FULL'
coordinate_system = 'PATIENT'
diff --git a/tests/test_seg.py b/tests/test_seg.py
index c7547ba8..f2712a0b 100644
--- a/tests/test_seg.py
+++ b/tests/test_seg.py
@@ -554,10 +554,10 @@ def test_construction_2(self):
assert seq[5].FunctionalGroupPointer == 0x0048021A
-class TestSegmentation(unittest.TestCase):
+class TestSegmentation:
+ @pytest.fixture(autouse=True)
def setUp(self):
- super().setUp()
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._segmented_property_category = \
@@ -683,6 +683,47 @@ def setUp(self):
)
self._ct_multiframe_mask_array[:, 100:200, 200:400] = True
+ self._tests = {
+ 'ct-image': ([self._ct_image], self._ct_pixel_array),
+ 'sm-image': ([self._sm_image], self._sm_pixel_array),
+ 'ct-series': (self._ct_series, self._ct_series_mask_array),
+ 'ct-multiframe': (
+ [self._ct_multiframe], self._ct_multiframe_mask_array
+ ),
+ }
+
+ # Fixtures to use to parametrize segmentation creation
+ # Using this fixture mechanism, we can parametrize class methods
+ @staticmethod
+ @pytest.fixture(params=[ExplicitVRLittleEndian, ImplicitVRLittleEndian])
+ def binary_transfer_syntax_uid(request):
+ return request.param
+
+ @staticmethod
+ @pytest.fixture(
+ params=[
+ ExplicitVRLittleEndian,
+ ImplicitVRLittleEndian,
+ RLELossless,
+ JPEG2000Lossless,
+ JPEGLSLossless,
+ ]
+ )
+ def fractional_transfer_syntax_uid(request):
+ return request.param
+
+ @staticmethod
+ @pytest.fixture(params=[np.bool_, np.uint8, np.uint16, np.float_])
+ def pix_type(request):
+ return request.param
+
+ @staticmethod
+ @pytest.fixture(
+ params=['ct-image', 'sm-image', 'ct-series', 'ct-multiframe'],
+ )
+ def test_data(request):
+ return request.param
+
@staticmethod
def sort_frames(sources, mask):
src = sources[0]
@@ -1365,325 +1406,313 @@ def test_construction_7(self):
assert SegmentsOverlapValues[instance.SegmentsOverlap] == \
SegmentsOverlapValues.NO
- def test_pixel_types(self):
- # A series of tests on different types of image
- tests = [
- ([self._ct_image], self._ct_pixel_array),
- ([self._sm_image], self._sm_pixel_array),
- (self._ct_series, self._ct_series_mask_array),
- ([self._ct_multiframe], self._ct_multiframe_mask_array),
- ]
+ def test_pixel_types_fractional(
+ self,
+ fractional_transfer_syntax_uid,
+ pix_type,
+ test_data,
+ ):
+ if fractional_transfer_syntax_uid == JPEGLSLossless:
+ pytest.importorskip("libjpeg")
+
+ sources, mask = self._tests[test_data]
+
+ # Two segments, overlapping
+ multi_segment_overlap = np.stack([mask, mask], axis=-1)
+ if multi_segment_overlap.ndim == 3:
+ multi_segment_overlap = multi_segment_overlap[np.newaxis, ...]
+
+ # Two segments non-overlapping
+ multi_segment_exc = np.stack([mask, 1 - mask], axis=-1)
+ if multi_segment_exc.ndim == 3:
+ multi_segment_exc = multi_segment_exc[np.newaxis, ...]
+ additional_mask = 1 - mask
+
+ # Find the expected encodings for the masks
+ if mask.ndim > 2:
+ # Expected encoding of the mask
+ expected_encoding = self.sort_frames(
+ sources,
+ mask
+ )
+ expected_encoding = self.remove_empty_frames(
+ expected_encoding
+ )
- for sources, mask in tests:
-
- # Two segments, overlapping
- multi_segment_overlap = np.stack([mask, mask], axis=-1)
- if multi_segment_overlap.ndim == 3:
- multi_segment_overlap = multi_segment_overlap[np.newaxis, ...]
-
- # Two segments non-overlapping
- multi_segment_exc = np.stack([mask, 1 - mask], axis=-1)
- if multi_segment_exc.ndim == 3:
- multi_segment_exc = multi_segment_exc[np.newaxis, ...]
- additional_mask = 1 - mask
-
- # Find the expected encodings for the masks
- if mask.ndim > 2:
- # Expected encoding of the mask
- expected_encoding = self.sort_frames(
- sources,
- mask
- )
- expected_encoding = self.remove_empty_frames(
- expected_encoding
- )
+ # Expected encoding of the complement
+ expected_encoding_comp = self.sort_frames(
+ sources,
+ additional_mask
+ )
+ expected_encoding_comp = self.remove_empty_frames(
+ expected_encoding_comp
+ )
- # Expected encoding of the complement
- expected_encoding_comp = self.sort_frames(
- sources,
- additional_mask
- )
- expected_encoding_comp = self.remove_empty_frames(
- expected_encoding_comp
- )
+ # Expected encoding of the multi segment arrays
+ expected_enc_overlap = np.concatenate(
+ [expected_encoding, expected_encoding],
+ axis=0
+ )
+ expected_enc_exc = np.concatenate(
+ [expected_encoding, expected_encoding_comp],
+ axis=0
+ )
+ expected_encoding = expected_encoding.squeeze()
+ else:
+ expected_encoding = mask
- # Expected encoding of the multi segment arrays
- expected_enc_overlap = np.concatenate(
- [expected_encoding, expected_encoding],
- axis=0
- )
- expected_enc_exc = np.concatenate(
- [expected_encoding, expected_encoding_comp],
- axis=0
- )
- expected_encoding = expected_encoding.squeeze()
- else:
- expected_encoding = mask
+ # Expected encoding of the multi segment arrays
+ expected_enc_overlap = np.stack(
+ [expected_encoding, expected_encoding],
+ axis=0
+ )
+ expected_enc_exc = np.stack(
+ [expected_encoding, 1 - expected_encoding],
+ axis=0
+ )
- # Expected encoding of the multi segment arrays
- expected_enc_overlap = np.stack(
- [expected_encoding, expected_encoding],
- axis=0
- )
- expected_enc_exc = np.stack(
- [expected_encoding, 1 - expected_encoding],
- axis=0
- )
+ max_fractional_value = 255
+ instance = Segmentation(
+ sources,
+ mask.astype(pix_type),
+ SegmentationTypeValues.FRACTIONAL.value,
+ self._segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=max_fractional_value,
+ transfer_syntax_uid=fractional_transfer_syntax_uid
+ )
- # Test instance creation for different pixel types and transfer
- # syntaxes
- valid_transfer_syntaxes = [
- ExplicitVRLittleEndian,
- ImplicitVRLittleEndian,
- RLELossless,
- JPEG2000Lossless,
- JPEGLSLossless,
- ]
-
- max_fractional_value = 255
- for transfer_syntax_uid in valid_transfer_syntaxes:
- for pix_type in [np.bool_, np.uint8, np.uint16, np.float_]:
- instance = Segmentation(
- sources,
- mask.astype(pix_type),
- SegmentationTypeValues.FRACTIONAL.value,
- self._segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=max_fractional_value,
- transfer_syntax_uid=transfer_syntax_uid
- )
-
- # Ensure the recovered pixel array matches what is expected
- if pix_type in (np.bool_, np.float_):
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_encoding * max_fractional_value
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- else:
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_encoding
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
-
- # Multi-segment (exclusive)
- instance = Segmentation(
- sources,
- multi_segment_exc.astype(pix_type),
- SegmentationTypeValues.FRACTIONAL.value,
- self._both_segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=1,
- transfer_syntax_uid=transfer_syntax_uid
- )
- if pix_type == np.float_:
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.UNDEFINED.value
- )
- else:
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.NO.value
- )
-
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_enc_exc
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
-
- # Multi-segment (overlapping)
- instance = Segmentation(
- sources,
- multi_segment_overlap.astype(pix_type),
- SegmentationTypeValues.FRACTIONAL.value,
- self._both_segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=1,
- transfer_syntax_uid=transfer_syntax_uid
- )
- if pix_type == np.float_:
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.UNDEFINED.value
- )
- else:
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.YES.value
- )
-
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_enc_overlap
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
-
- for sources, mask in tests:
- # Two segments, overlapping
- multi_segment_overlap = np.stack([mask, mask], axis=-1)
- if multi_segment_overlap.ndim == 3:
- multi_segment_overlap = multi_segment_overlap[np.newaxis, ...]
-
- # Two segments non-overlapping
- multi_segment_exc = np.stack([mask, 1 - mask], axis=-1)
-
- if multi_segment_exc.ndim == 3:
- multi_segment_exc = multi_segment_exc[np.newaxis, ...]
- additional_mask = 1 - mask
-
- additional_mask = (1 - mask)
- # Find the expected encodings for the masks
- if mask.ndim > 2:
- # Expected encoding of the mask
- expected_encoding = self.sort_frames(
- sources,
- mask
- )
- expected_encoding = self.remove_empty_frames(
- expected_encoding
- )
+ # Ensure the recovered pixel array matches what is expected
+ if pix_type in (np.bool_, np.float_):
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_encoding * max_fractional_value
+ ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}'
+ else:
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_encoding
+ ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
- # Expected encoding of the complement
- expected_encoding_comp = self.sort_frames(
- sources,
- additional_mask
- )
- expected_encoding_comp = self.remove_empty_frames(
- expected_encoding_comp
- )
+ # Multi-segment (exclusive)
+ instance = Segmentation(
+ sources,
+ multi_segment_exc.astype(pix_type),
+ SegmentationTypeValues.FRACTIONAL.value,
+ self._both_segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=1,
+ transfer_syntax_uid=fractional_transfer_syntax_uid
+ )
+ if pix_type == np.float_:
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.UNDEFINED.value
+ )
+ else:
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.NO.value
+ )
- # Expected encoding of the multi segment arrays
- expected_enc_overlap = np.concatenate(
- [expected_encoding, expected_encoding],
- axis=0
- )
- expected_enc_exc = np.concatenate(
- [expected_encoding, expected_encoding_comp],
- axis=0
- )
- expected_encoding = expected_encoding.squeeze()
- else:
- expected_encoding = mask
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_enc_exc
+ ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
- # Expected encoding of the multi segment arrays
- expected_enc_overlap = np.stack(
- [expected_encoding, expected_encoding],
- axis=0
- )
- expected_enc_exc = np.stack(
- [expected_encoding, 1 - expected_encoding],
- axis=0
- )
+ # Multi-segment (overlapping)
+ instance = Segmentation(
+ sources,
+ multi_segment_overlap.astype(pix_type),
+ SegmentationTypeValues.FRACTIONAL.value,
+ self._both_segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=1,
+ transfer_syntax_uid=fractional_transfer_syntax_uid
+ )
+ if pix_type == np.float_:
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.UNDEFINED.value
+ )
+ else:
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.YES.value
+ )
- valid_transfer_syntaxes = [
- ExplicitVRLittleEndian,
- ImplicitVRLittleEndian,
- ]
-
- for transfer_syntax_uid in valid_transfer_syntaxes:
- for pix_type in [np.bool_, np.uint8, np.uint16, np.float_]:
- instance = Segmentation(
- sources,
- mask.astype(pix_type),
- SegmentationTypeValues.BINARY.value,
- self._segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=1,
- transfer_syntax_uid=transfer_syntax_uid
- )
-
- # Ensure the recovered pixel array matches what is expected
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_encoding
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
-
- # Multi-segment (exclusive)
- instance = Segmentation(
- sources,
- multi_segment_exc.astype(pix_type),
- SegmentationTypeValues.BINARY.value,
- self._both_segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=1,
- transfer_syntax_uid=transfer_syntax_uid
- )
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.NO.value
- )
-
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_enc_exc
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
-
- # Multi-segment (overlapping)
- instance = Segmentation(
- sources,
- multi_segment_overlap.astype(pix_type),
- SegmentationTypeValues.BINARY.value,
- self._both_segment_descriptions,
- self._series_instance_uid,
- self._series_number,
- self._sop_instance_uid,
- self._instance_number,
- self._manufacturer,
- self._manufacturer_model_name,
- self._software_versions,
- self._device_serial_number,
- max_fractional_value=1,
- transfer_syntax_uid=transfer_syntax_uid
- )
- assert (
- instance.SegmentsOverlap ==
- SegmentsOverlapValues.YES.value
- )
-
- assert np.array_equal(
- self.get_array_after_writing(instance),
- expected_enc_overlap
- ), f'{sources[0].Modality} {transfer_syntax_uid}'
- self.check_dimension_index_vals(instance)
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_enc_overlap
+ ), f'{sources[0].Modality} {fractional_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
+
+ def test_pixel_types_binary(
+ self,
+ binary_transfer_syntax_uid,
+ pix_type,
+ test_data,
+ ):
+ sources, mask = self._tests[test_data]
+
+ # Two segments, overlapping
+ multi_segment_overlap = np.stack([mask, mask], axis=-1)
+ if multi_segment_overlap.ndim == 3:
+ multi_segment_overlap = multi_segment_overlap[np.newaxis, ...]
+
+ # Two segments non-overlapping
+ multi_segment_exc = np.stack([mask, 1 - mask], axis=-1)
+
+ if multi_segment_exc.ndim == 3:
+ multi_segment_exc = multi_segment_exc[np.newaxis, ...]
+ additional_mask = 1 - mask
+
+ additional_mask = (1 - mask)
+ # Find the expected encodings for the masks
+ if mask.ndim > 2:
+ # Expected encoding of the mask
+ expected_encoding = self.sort_frames(
+ sources,
+ mask
+ )
+ expected_encoding = self.remove_empty_frames(
+ expected_encoding
+ )
+
+ # Expected encoding of the complement
+ expected_encoding_comp = self.sort_frames(
+ sources,
+ additional_mask
+ )
+ expected_encoding_comp = self.remove_empty_frames(
+ expected_encoding_comp
+ )
+
+ # Expected encoding of the multi segment arrays
+ expected_enc_overlap = np.concatenate(
+ [expected_encoding, expected_encoding],
+ axis=0
+ )
+ expected_enc_exc = np.concatenate(
+ [expected_encoding, expected_encoding_comp],
+ axis=0
+ )
+ expected_encoding = expected_encoding.squeeze()
+ else:
+ expected_encoding = mask
+
+ # Expected encoding of the multi segment arrays
+ expected_enc_overlap = np.stack(
+ [expected_encoding, expected_encoding],
+ axis=0
+ )
+ expected_enc_exc = np.stack(
+ [expected_encoding, 1 - expected_encoding],
+ axis=0
+ )
+
+ instance = Segmentation(
+ sources,
+ mask.astype(pix_type),
+ SegmentationTypeValues.BINARY.value,
+ self._segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=1,
+ transfer_syntax_uid=binary_transfer_syntax_uid
+ )
+
+ # Ensure the recovered pixel array matches what is expected
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_encoding
+ ), f'{sources[0].Modality} {binary_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
+
+ # Multi-segment (exclusive)
+ instance = Segmentation(
+ sources,
+ multi_segment_exc.astype(pix_type),
+ SegmentationTypeValues.BINARY.value,
+ self._both_segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=1,
+ transfer_syntax_uid=binary_transfer_syntax_uid
+ )
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.NO.value
+ )
+
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_enc_exc
+ ), f'{sources[0].Modality} {binary_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
+
+ # Multi-segment (overlapping)
+ instance = Segmentation(
+ sources,
+ multi_segment_overlap.astype(pix_type),
+ SegmentationTypeValues.BINARY.value,
+ self._both_segment_descriptions,
+ self._series_instance_uid,
+ self._series_number,
+ self._sop_instance_uid,
+ self._instance_number,
+ self._manufacturer,
+ self._manufacturer_model_name,
+ self._software_versions,
+ self._device_serial_number,
+ max_fractional_value=1,
+ transfer_syntax_uid=binary_transfer_syntax_uid
+ )
+ assert (
+ instance.SegmentsOverlap ==
+ SegmentsOverlapValues.YES.value
+ )
+
+ assert np.array_equal(
+ self.get_array_after_writing(instance),
+ expected_enc_overlap
+ ), f'{sources[0].Modality} {binary_transfer_syntax_uid}'
+ self.check_dimension_index_vals(instance)
def test_odd_number_pixels(self):
# Test that an image with an odd number of pixels per frame is encoded
@@ -1841,6 +1870,51 @@ def test_construction_empty_source_image(self):
device_serial_number=self._device_serial_number
)
+ def test_construction_empty_source_seg_sparse(self):
+ # Can encoding an empty segmentation with omit_empty_frames=True issues
+ # a warning and encodes the full segmentation
+ empty_pixel_array = np.zeros_like(self._ct_pixel_array)
+ seg = Segmentation(
+ source_images=[self._ct_image],
+ pixel_array=empty_pixel_array,
+ segmentation_type=SegmentationTypeValues.FRACTIONAL.value,
+ segment_descriptions=(
+ self._segment_descriptions
+ ),
+ series_instance_uid=self._series_instance_uid,
+ series_number=self._series_number,
+ sop_instance_uid=self._sop_instance_uid,
+ instance_number=self._instance_number,
+ manufacturer=self._manufacturer,
+ manufacturer_model_name=self._manufacturer_model_name,
+ software_versions=self._software_versions,
+ device_serial_number=self._device_serial_number,
+ omit_empty_frames=True,
+ )
+
+ assert seg.pixel_array.shape == empty_pixel_array.shape
+
+ def test_construction_empty_seg_image(self):
+ # Can encode an empty segmentation with omit_empty_frames=False
+ empty_pixel_array = np.zeros_like(self._ct_pixel_array)
+ Segmentation(
+ source_images=[self._ct_image],
+ pixel_array=empty_pixel_array,
+ segmentation_type=SegmentationTypeValues.FRACTIONAL.value,
+ segment_descriptions=(
+ self._segment_descriptions
+ ),
+ series_instance_uid=self._series_instance_uid,
+ series_number=self._series_number,
+ sop_instance_uid=self._sop_instance_uid,
+ instance_number=self._instance_number,
+ manufacturer=self._manufacturer,
+ manufacturer_model_name=self._manufacturer_model_name,
+ software_versions=self._software_versions,
+ device_serial_number=self._device_serial_number,
+ omit_empty_frames=False,
+ )
+
def test_construction_invalid_content_label(self):
with pytest.raises(ValueError):
Segmentation(
diff --git a/tests/test_sr.py b/tests/test_sr.py
index a49fcb29..956d36e4 100644
--- a/tests/test_sr.py
+++ b/tests/test_sr.py
@@ -3075,6 +3075,11 @@ class TestMeasurementReport(unittest.TestCase):
def setUp(self):
super().setUp()
+ file_path = Path(__file__)
+ data_dir = file_path.parent.parent.joinpath('data')
+ self._ref_dataset = dcmread(
+ str(data_dir.joinpath('test_files', 'ct_image.dcm'))
+ )
self._person_observer_name = 'Bar^Foo'
self._observer_person_context = ObserverContext(
observer_type=codes.cid270.Person,
@@ -3182,7 +3187,8 @@ def test_construction_image(self):
measurement_report = MeasurementReport(
observation_context=self._observation_context,
procedure_reported=self._procedure_reported,
- imaging_measurements=[self._image_group]
+ imaging_measurements=[self._image_group],
+ referenced_images=[self._ref_dataset]
)
item = measurement_report[0]
assert len(item.ContentSequence) == 13
@@ -3281,7 +3287,7 @@ def test_construction_planar(self):
imaging_measurements=[self._roi_group]
)
item = measurement_report[0]
- assert len(item.ContentSequence) == 13
+ assert len(item.ContentSequence) == 12
template_item = item.ContentTemplateSequence[0]
assert template_item.TemplateIdentifier == '1500'
@@ -3303,10 +3309,8 @@ def test_construction_planar(self):
(9, '111700'),
# Procedure reported
(10, '121058'),
- # Image library
- (11, '111028'),
# Imaging measurements
- (12, '126010'),
+ (11, '126010'),
]
for index, value in content_item_expectations:
content_item = item.ContentSequence[index]
@@ -3359,7 +3363,7 @@ def test_construction_volumetric(self):
imaging_measurements=[self._roi_group_3d]
)
item = measurement_report[0]
- assert len(item.ContentSequence) == 13
+ assert len(item.ContentSequence) == 12
template_item = item.ContentTemplateSequence[0]
assert template_item.TemplateIdentifier == '1500'
@@ -3369,22 +3373,26 @@ def test_construction_volumetric(self):
(0, '121049'),
# Observer context - Person
(1, '121005'),
+ # Observer Name - Person
(2, '121008'),
# Observer context - Device
(3, '121005'),
+ # Observer UID - Device
(4, '121012'),
# Subject context - Specimen
(5, '121024'),
+ # UID, Specimen
(6, '121039'),
+ # Identifier, Specimen
(7, '121041'),
+ # Type, Specimen
(8, '371439000'),
+ # Container Identifier, Specimen
(9, '111700'),
# Procedure reported
(10, '121058'),
- # Image library
- (11, '111028'),
# Imaging measurements
- (12, '126010'),
+ (11, '126010'),
]
for index, value in content_item_expectations:
content_item = item.ContentSequence[index]
@@ -4905,12 +4913,110 @@ class TestImageLibraryEntryDescriptors(unittest.TestCase):
def setUp(self):
super().setUp()
+ file_path = Path(__file__)
+ data_dir = file_path.parent.parent.joinpath('data')
+ self._ref_ct_dataset = dcmread(
+ str(data_dir.joinpath('test_files', 'ct_image.dcm'))
+ )
+ self._ref_sm_dataset = dcmread(
+ str(data_dir.joinpath('test_files', 'sm_image.dcm'))
+ )
+ self._ref_dx_dataset = dcmread(
+ str(data_dir.joinpath('test_files', 'dx_image.dcm'))
+ )
- def test_construction(self):
- modality = codes.cid29.SlideMicroscopy
- frame_of_reference_uid = '1.2.3'
- pixel_data_rows = 10
- pixel_data_columns = 20
+ def test_ct_construction(self):
+ group = ImageLibraryEntryDescriptors(
+ image=self._ref_ct_dataset,
+ )
+ assert len(group) == 17
+ assert isinstance(group[0], CodeContentItem)
+ assert group[0].name == codes.DCM.Modality
+ assert group[0].value == codes.cid29.ComputedTomography
+ assert isinstance(group[1], UIDRefContentItem)
+ assert group[1].name == codes.DCM.FrameOfReferenceUID
+ assert group[1].value == self._ref_ct_dataset.FrameOfReferenceUID
+ assert isinstance(group[2], NumContentItem)
+ assert group[2].name == codes.DCM.PixelDataRows
+ assert group[2].value == self._ref_ct_dataset.Rows
+ assert isinstance(group[3], NumContentItem)
+ assert group[3].name == codes.DCM.PixelDataColumns
+ assert group[3].value == self._ref_ct_dataset.Columns
+ assert isinstance(group[4], NumContentItem)
+ assert group[4].name == codes.DCM.HorizontalPixelSpacing
+ assert group[4].value == self._ref_ct_dataset.PixelSpacing[0]
+ value_item = group[4].MeasuredValueSequence[0]
+ unit_code_item = value_item.MeasurementUnitsCodeSequence[0]
+ assert unit_code_item.CodeValue == 'mm'
+ assert unit_code_item.CodeMeaning == 'millimeter'
+ assert unit_code_item.CodingSchemeDesignator == 'UCUM'
+ assert isinstance(group[5], NumContentItem)
+ assert group[5].name == codes.DCM.VerticalPixelSpacing
+ assert group[5].value == self._ref_ct_dataset.PixelSpacing[1]
+ assert isinstance(group[6], NumContentItem)
+ assert group[6].name == codes.DCM.SpacingBetweenSlices
+ assert group[6].value == self._ref_ct_dataset.SpacingBetweenSlices
+ value_item = group[6].MeasuredValueSequence[0]
+ unit_code_item = value_item.MeasurementUnitsCodeSequence[0]
+ assert unit_code_item.CodeValue == 'mm'
+ assert unit_code_item.CodeMeaning == 'millimeter'
+ assert unit_code_item.CodingSchemeDesignator == 'UCUM'
+ assert isinstance(group[7], NumContentItem)
+ assert group[7].name == codes.DCM.SliceThickness
+ assert group[7].value == self._ref_ct_dataset.SliceThickness
+ assert isinstance(group[8], NumContentItem)
+ assert group[8].name == codes.DCM.ImagePositionPatientX
+ assert group[8].value == self._ref_ct_dataset.ImagePositionPatient[0]
+ assert isinstance(group[9], NumContentItem)
+ assert group[9].name == codes.DCM.ImagePositionPatientY
+ assert group[9].value == self._ref_ct_dataset.ImagePositionPatient[1]
+ assert isinstance(group[10], NumContentItem)
+ assert group[10].name == codes.DCM.ImagePositionPatientZ
+ assert group[10].value == self._ref_ct_dataset.ImagePositionPatient[2]
+ assert isinstance(group[11], NumContentItem)
+ assert group[11].name == codes.DCM.ImageOrientationPatientRowX
+ assert group[11].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[0]
+ value_item = group[11].MeasuredValueSequence[0]
+ unit_code_item = value_item.MeasurementUnitsCodeSequence[0]
+ assert unit_code_item.CodeValue == '{-1:1}'
+ assert unit_code_item.CodeMeaning == '{-1:1}'
+ assert unit_code_item.CodingSchemeDesignator == 'UCUM'
+ assert isinstance(group[12], NumContentItem)
+ assert group[12].name == codes.DCM.ImageOrientationPatientRowY
+ assert group[12].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[1]
+ assert isinstance(group[13], NumContentItem)
+ assert group[13].name == codes.DCM.ImageOrientationPatientRowZ
+ assert group[13].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[2]
+ assert isinstance(group[14], NumContentItem)
+ assert group[14].name == codes.DCM.ImageOrientationPatientColumnX
+ assert group[14].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[3]
+ assert isinstance(group[15], NumContentItem)
+ assert group[15].name == codes.DCM.ImageOrientationPatientColumnY
+ assert group[15].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[4]
+ assert isinstance(group[16], NumContentItem)
+ assert group[16].name == codes.DCM.ImageOrientationPatientColumnZ
+ assert group[16].value == \
+ self._ref_ct_dataset.ImageOrientationPatient[5]
+ value_item = group[16].MeasuredValueSequence[0]
+ unit_code_item = value_item.MeasurementUnitsCodeSequence[0]
+ assert unit_code_item.CodeValue == '{-1:1}'
+ assert unit_code_item.CodeMeaning == '{-1:1}'
+ assert unit_code_item.CodingSchemeDesignator == 'UCUM'
+
+ def test_bad_ct_construction(self):
+ # Test failure of ImageLibraryDescriptors with 'bad' image.
+ del self._ref_ct_dataset.Rows
+ with pytest.raises(AttributeError):
+ ImageLibraryEntryDescriptors(
+ image=self._ref_ct_dataset,
+ )
+
+ def test_sm_construction(self):
content_date = datetime.now().date()
content_time = datetime.now().time()
content_date_item = DateContentItem(
@@ -4924,25 +5030,22 @@ def test_construction(self):
relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
)
group = ImageLibraryEntryDescriptors(
- modality=modality,
- frame_of_reference_uid=frame_of_reference_uid,
- pixel_data_rows=pixel_data_rows,
- pixel_data_columns=pixel_data_columns,
+ image=self._ref_sm_dataset,
additional_descriptors=[content_date_item, content_time_item]
)
assert len(group) == 6
assert isinstance(group[0], CodeContentItem)
assert group[0].name == codes.DCM.Modality
- assert group[0].value == modality
+ assert group[0].value == codes.cid29.SlideMicroscopy
assert isinstance(group[1], UIDRefContentItem)
assert group[1].name == codes.DCM.FrameOfReferenceUID
- assert group[1].value == frame_of_reference_uid
+ assert group[1].value == self._ref_sm_dataset.FrameOfReferenceUID
assert isinstance(group[2], NumContentItem)
assert group[2].name == codes.DCM.PixelDataRows
- assert group[2].value == pixel_data_rows
+ assert group[2].value == self._ref_sm_dataset.Rows
assert isinstance(group[3], NumContentItem)
assert group[3].name == codes.DCM.PixelDataColumns
- assert group[3].value == pixel_data_columns
+ assert group[3].value == self._ref_sm_dataset.Columns
assert isinstance(group[4], DateContentItem)
assert group[4].name == codes.DCM.ContentDate
assert group[4].value == content_date
@@ -4950,6 +5053,55 @@ def test_construction(self):
assert group[5].name == codes.DCM.ContentTime
assert group[5].value == content_time
+ def test_dx_construction(self):
+ content_date = datetime.now().date()
+ content_time = datetime.now().time()
+ imager_pixel_spacing = self._ref_dx_dataset.ImagerPixelSpacing
+ patient_orientation = self._ref_dx_dataset.PatientOrientation
+
+ content_date_item = DateContentItem(
+ name=codes.DCM.ContentDate,
+ value=content_date,
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
+ )
+ content_time_item = TimeContentItem(
+ name=codes.DCM.ContentTime,
+ value=content_time,
+ relationship_type=RelationshipTypeValues.HAS_ACQ_CONTEXT
+ )
+ group = ImageLibraryEntryDescriptors(
+ image=self._ref_dx_dataset,
+ additional_descriptors=[content_date_item, content_time_item]
+ )
+ assert len(group) == 9
+ assert isinstance(group[0], CodeContentItem)
+ assert group[0].name == codes.DCM.Modality
+ assert group[0].value == codes.cid29.DigitalRadiography
+ assert isinstance(group[1], NumContentItem)
+ assert group[1].name == codes.DCM.PixelDataRows
+ assert group[1].value == self._ref_dx_dataset.Rows
+ assert isinstance(group[2], NumContentItem)
+ assert group[2].name == codes.DCM.PixelDataColumns
+ assert group[2].value == self._ref_dx_dataset.Columns
+ assert isinstance(group[3], TextContentItem)
+ assert group[3].name == codes.DCM.PatientOrientationRow
+ assert group[3].value == patient_orientation[0]
+ assert isinstance(group[4], TextContentItem)
+ assert group[4].name == codes.DCM.PatientOrientationColumn
+ assert group[4].value == patient_orientation[1]
+ assert isinstance(group[5], NumContentItem)
+ assert group[5].name == codes.DCM.HorizontalPixelSpacing
+ assert group[5].value == imager_pixel_spacing[1]
+ assert isinstance(group[6], NumContentItem)
+ assert group[6].name == codes.DCM.VerticalPixelSpacing
+ assert group[6].value == imager_pixel_spacing[0]
+ assert isinstance(group[7], DateContentItem)
+ assert group[7].name == codes.DCM.ContentDate
+ assert group[7].value == content_date
+ assert isinstance(group[8], TimeContentItem)
+ assert group[8].name == codes.DCM.ContentTime
+ assert group[8].value == content_time
+
class TestImageLibrary(unittest.TestCase):
@@ -4957,18 +5109,24 @@ def setUp(self):
super().setUp()
def test_construction(self):
- modality = codes.cid29.SlideMicroscopy
- frame_of_reference_uid = '1.2.3'
- pixel_data_rows = 10
- pixel_data_columns = 20
- descriptor_items = ImageLibraryEntryDescriptors(
- modality=modality,
- frame_of_reference_uid=frame_of_reference_uid,
- pixel_data_rows=pixel_data_rows,
- pixel_data_columns=pixel_data_columns,
+ file_path = Path(__file__)
+ data_dir = file_path.parent.parent.joinpath('data')
+ self._ref_sm_dataset = dcmread(
+ str(data_dir.joinpath('test_files', 'sm_image.dcm'))
)
- library_items = ImageLibrary(groups=[descriptor_items])
+
+ library_items = ImageLibrary([self._ref_sm_dataset])
assert len(library_items) == 1
library_group_item = library_items[0].ContentSequence[0]
- assert len(library_group_item.ContentSequence) == len(descriptor_items)
+ assert len(library_group_item.ContentSequence) == 1
assert library_group_item.name == codes.DCM.ImageLibraryGroup
+ content_item = library_group_item.ContentSequence[0]
+ assert isinstance(content_item, ImageContentItem)
+ ref_sop_instance_uid = \
+ content_item.ReferencedSOPSequence[0].ReferencedSOPInstanceUID
+ ref_sop_class_uid = \
+ content_item.ReferencedSOPSequence[0].ReferencedSOPClassUID
+ assert ref_sop_instance_uid == \
+ self._ref_sm_dataset.SOPInstanceUID
+ assert ref_sop_class_uid == \
+ self._ref_sm_dataset.SOPClassUID