Skip to content

Commit

Permalink
Merge pull request #639 from HEXRD/eiger-stream-v1
Browse files Browse the repository at this point in the history
Add eiger-stream-v1 imageseries format
  • Loading branch information
psavery authored May 17, 2024
2 parents 30e7f0d + edca8b0 commit 9c500f8
Show file tree
Hide file tree
Showing 5 changed files with 237 additions and 83 deletions.
2 changes: 1 addition & 1 deletion conda.recipe/conda_build_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ c_compiler:
target_platform:
- linux-64 # [linux]
- osx-64 # [osx]
- osx-arm64 # [osx]
# - osx-arm64 # [osx]
- win-64 # [win]
14 changes: 13 additions & 1 deletion hexrd/imageseries/load/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,19 @@ class ImageSeriesAdapter(ImageSeriesABC, metaclass=_RegisterAdapterClass):

# import all adapter modules

from . import array, framecache, hdf5, imagefiles, rawimage, metadata, trivial
from . import (
array, framecache, hdf5, imagefiles, rawimage, metadata, trivial
)

try:
from dectris.compression import decompress
except ImportError:
# Dectris compression is not available. Skip the eiger_stream_v1
pass
else:
# Eiger stream v1 is supported
from . import eiger_stream_v1


#for loader, name, ispkg in pkgutil.iter_modules(__path__):
# if name is not 'registry':
Expand Down
138 changes: 138 additions & 0 deletions hexrd/imageseries/load/eiger_stream_v1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
"""HDF5 adapter class
"""
import warnings

from dectris.compression import decompress
import h5py
import numpy as np

from hexrd.utils.hdf5 import unwrap_h5_to_dict

from . import ImageSeriesAdapter
from ..imageseriesiter import ImageSeriesIterator


class EigerStreamV1ImageSeriesAdapter(ImageSeriesAdapter):

format = 'eiger-stream-v1'

def __init__(self, fname, **kwargs):
if isinstance(fname, h5py.File):
self.__h5name = fname.filename
self.__h5file = fname
else:
self.__h5name = fname
self.__h5file = h5py.File(self.__h5name, 'r')

self.__data_group_path = '/data'
self._load_metadata()

def close(self):
if self.__h5file is not None:
self.__h5file.close()
self.__h5file = None

def __del__(self):
# !!! Note this is not ideal, as the use of __del__ is problematic.
# However, it is highly unlikely that the usage of a ImageSeries
# would pose a problem. A warning will (hopefully) be emitted if
# an issue arises at some point
try:
self.close()
except Exception:
msg = "EigerStreamV1ImageSeriesAdapter could not close h5 file"
warnings.warn(msg)

def __getitem__(self, key):
if isinstance(key, int):
idx = key
rest = None
else:
idx = key[0]
rest = key[1:]

entry = self._data_group[str(idx)]
d = {}
unwrap_h5_to_dict(entry, d)
data = _decompress_frame(d)
if rest is None:
return data
else:
return data[rest]

def __iter__(self):
return ImageSeriesIterator(self)

def __len__(self):
return len(self._data_group)

def __getstate__(self):
# Remove any non-pickleable attributes
to_remove = [
'__h5file',
]

# Prefix them with the private prefix
prefix = f'_{self.__class__.__name__}'
to_remove = [f'{prefix}{x}' for x in to_remove]

# Make a copy of the dict to modify
state = self.__dict__.copy()

# Remove them
for attr in to_remove:
state.pop(attr)

return state

def __setstate__(self, state):
self.__dict__.update(state)
self.__h5file = h5py.File(self.__h5name, 'r')
self._load_metadata()

def _load_metadata(self):
self.__meta = self._get_metadata()

def _get_metadata(self):
d = {}
unwrap_h5_to_dict(self.__h5file['/metadata'], d)
return d

@property
def metadata(self):
"""(read-only) Image sequence metadata
note: metadata loaded on open and allowed to be modified
"""
return self.__meta

@property
def _data_group(self):
return self.__h5file[self.__data_group_path]

@property
def _first_data_entry(self):
return self._data_group['0']

@property
def dtype(self):
return self._first_data_entry['dtype'][()]

@property
def shape(self):
return tuple(self._first_data_entry['shape'][()])


def _decompress_frame(d: dict) -> np.ndarray:
compression_type = d['compression_type']
dtype = d['dtype']
shape = d['shape']
data = d['data']
elem_size = d['elem_size']

if compression_type is None:
return np.frombuffer(data, dtype=dtype).reshape(shape)

decompressed_bytes = decompress(data, compression_type,
elem_size=elem_size)
return np.frombuffer(decompressed_bytes, dtype=dtype).reshape(shape)
82 changes: 1 addition & 81 deletions hexrd/instrument/hedm_instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@
from hexrd import distortion as distortion_pkg
from hexrd.utils.compatibility import h5py_read_string
from hexrd.utils.concurrent import distribute_tasks
from hexrd.utils.hdf5 import unwrap_dict_to_h5, unwrap_h5_to_dict
from hexrd.utils.yaml import NumpyToNativeDumper
from hexrd.valunits import valWUnit
from hexrd.wppf import LeBail
Expand Down Expand Up @@ -2355,87 +2356,6 @@ def dump_patch(self, panel_id,
return


def unwrap_dict_to_h5(grp, d, asattr=False):
"""
Unwraps a dictionary to an HDF5 file of the same structure.
Parameters
----------
grp : HDF5 group object
The HDF5 group to recursively unwrap the dict into.
d : dict
Input dict (of dicts).
asattr : bool, optional
Flag to write end member in dictionary tree to an attribute. If False,
if writes the object to a dataset using numpy. The default is False.
Returns
-------
None.
"""
while len(d) > 0:
key, item = d.popitem()
if isinstance(item, dict):
subgrp = grp.create_group(key)
unwrap_dict_to_h5(subgrp, item, asattr=asattr)
else:
if asattr:
try:
grp.attrs.create(key, item)
except TypeError:
if item is None:
continue
else:
raise
else:
try:
grp.create_dataset(key, data=np.atleast_1d(item))
except TypeError:
if item is None:
continue
else:
# probably a string badness
grp.create_dataset(key, data=item)


def unwrap_h5_to_dict(f, d):
"""
Unwraps a simple HDF5 file to a dictionary of the same structure.
Parameters
----------
f : HDF5 file (mode r)
The input HDF5 file object.
d : dict
dictionary object to update.
Returns
-------
None.
Notes
-----
As written, ignores attributes and uses numpy to cast HDF5 datasets to
dict entries. Checks for 'O' type arrays and casts to strings; also
converts single-element arrays to scalars.
"""
for key, val in f.items():
try:
d[key] = {}
unwrap_h5_to_dict(val, d[key])
except AttributeError:
# reached a dataset
if np.dtype(val) == 'O':
d[key] = h5py_read_string(val)
else:
tmp = np.array(val)
if tmp.ndim == 1 and len(tmp) == 1:
d[key] = tmp[0]
else:
d[key] = tmp


class GenerateEtaOmeMaps(object):
"""
eta-ome map class derived from new image_series and YAML config
Expand Down
84 changes: 84 additions & 0 deletions hexrd/utils/hdf5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
from hexrd.utils.compatibility import h5py_read_string

import numpy as np


def unwrap_dict_to_h5(grp, d, asattr=False):
"""
Unwraps a dictionary to an HDF5 file of the same structure.
Parameters
----------
grp : HDF5 group object
The HDF5 group to recursively unwrap the dict into.
d : dict
Input dict (of dicts).
asattr : bool, optional
Flag to write end member in dictionary tree to an attribute. If False,
if writes the object to a dataset using numpy. The default is False.
Returns
-------
None.
"""
while len(d) > 0:
key, item = d.popitem()
if isinstance(item, dict):
subgrp = grp.create_group(key)
unwrap_dict_to_h5(subgrp, item, asattr=asattr)
else:
if asattr:
try:
grp.attrs.create(key, item)
except TypeError:
if item is None:
continue
else:
raise
else:
try:
grp.create_dataset(key, data=np.atleast_1d(item))
except TypeError:
if item is None:
continue
else:
# probably a string badness
grp.create_dataset(key, data=item)


def unwrap_h5_to_dict(f, d):
"""
Unwraps a simple HDF5 file to a dictionary of the same structure.
Parameters
----------
f : HDF5 file (mode r)
The input HDF5 file object.
d : dict
dictionary object to update.
Returns
-------
None.
Notes
-----
As written, ignores attributes and uses numpy to cast HDF5 datasets to
dict entries. Checks for 'O' type arrays and casts to strings; also
converts single-element arrays to scalars.
"""
for key, val in f.items():
try:
d[key] = {}
unwrap_h5_to_dict(val, d[key])
except AttributeError:
# reached a dataset
if np.dtype(val) == 'O':
d[key] = h5py_read_string(val)
else:
tmp = np.array(val)
if tmp.ndim == 1 and len(tmp) == 1:
d[key] = tmp[0]
else:
d[key] = tmp

0 comments on commit 9c500f8

Please sign in to comment.