diff --git a/OmeSliCC/OmeZarr.py b/OmeSliCC/OmeZarr.py index d4649ee..b37c8d2 100644 --- a/OmeSliCC/OmeZarr.py +++ b/OmeSliCC/OmeZarr.py @@ -13,8 +13,10 @@ class OmeZarr: def __init__(self, filename): self.filename = filename - def write(self, data, source, tile_size=[], - npyramid_add=0, pyramid_downsample=2, compression=[]): + def write(self, sources, tile_size=[], compression=[], + npyramid_add=0, pyramid_downsample=2, translations=[], + image_operations=[]): + compressor, compression_filters = create_compression_filter(compression) storage_options = {'dimension_separator': '/', 'chunks': tile_size} if compressor is not None: @@ -22,8 +24,44 @@ def write(self, data, source, tile_size=[], if compression_filters is not None: storage_options['filters'] = compression_filters - zarr_root = zarr.group(parse_url(self.filename, mode="w").store, overwrite=True) + zarr_root = zarr.open_group(parse_url(self.filename, mode="w").store, + mode="w", storage_options=storage_options) + root_path = '' + + multiple_images = isinstance(sources, list) + multi_metadata = [] + if not multiple_images: + sources = [sources] + + for index, source in enumerate(sources): + data = source.asarray() + for image_operation in image_operations: + data = image_operation(data) + if multiple_images: + root_path = str(index) + root = zarr_root.create_group(root_path) + else: + root = zarr_root + if index < len(translations): + translation = translations[index] + else: + translation = [] + self.write_dataset(root, data, source, npyramid_add, pyramid_downsample, translation) + if multiple_images: + meta = root.attrs['multiscales'][0].copy() + for dataset_meta in meta['datasets']: + dataset_meta['path'] = f'{root_path}/{dataset_meta["path"]}' + multi_metadata.append(meta) + if multiple_images: + zarr_root.attrs['multiscales'] = multi_metadata + zarr_root.attrs['omero'] = create_channel_metadata(sources[0]) + + def write_dataset(self, zarr_root, data, source, + npyramid_add=0, pyramid_downsample=2, translation=[]): + pixel_size_um = source.get_pixel_size_micrometer() + if len(pixel_size_um) == 0: + npyramid_add = 0 dimension_order = source.get_dimension_order() if 'c' in dimension_order and dimension_order.index('c') == len(dimension_order) - 1: @@ -36,11 +74,9 @@ def write(self, data, source, tile_size=[], pixel_size_scales = [] scale = 1 for i in range(npyramid_add + 1): - pixel_size_scales.append(create_transformation_metadata(dimension_order, pixel_size_um, scale)) - scale /= pyramid_downsample + pixel_size_scales.append(create_transformation_metadata(dimension_order, pixel_size_um, scale, translation)) + if pyramid_downsample: + scale /= pyramid_downsample write_image(image=data, group=zarr_root, axes=axes, coordinate_transformations=pixel_size_scales, - scaler=Scaler(downscale=pyramid_downsample, max_layer=npyramid_add), - storage_options=storage_options) - - zarr_root.attrs['omero'] = create_channel_metadata(source) + scaler=Scaler(downscale=pyramid_downsample, max_layer=npyramid_add), overwrite=True) diff --git a/OmeSliCC/conversion.py b/OmeSliCC/conversion.py index 6f2a4fc..472bca4 100644 --- a/OmeSliCC/conversion.py +++ b/OmeSliCC/conversion.py @@ -168,6 +168,28 @@ def combine_images(sources: list[OmeSource], params: dict): save_image(image, output_filename, output_params) +def store_tiles(sources: list[OmeSource], output_filename: str, params: dict, + composition_metadata: list = [], image_operations: list = []): + output_params = params['output'] + tile_size = output_params.get('tile_size') + compression = output_params.get('compression') + npyramid_add = output_params.get('npyramid_add', 0) + pyramid_downsample = output_params.get('pyramid_downsample') + + translations = [] + pixel_size = sources[0].get_pixel_size_micrometer() + for meta in composition_metadata: + bounds = meta['Bounds'] + translation = bounds['StartX'], bounds['StartY'] + translation_um = np.multiply(translation, pixel_size[:2]) + translations.append(translation_um) + + zarr = OmeZarr(output_filename) + zarr.write(sources, tile_size=tile_size, compression=compression, + npyramid_add=npyramid_add, pyramid_downsample=pyramid_downsample, + translations=translations, image_operations=image_operations) + + def get_source_image(source: OmeSource): image = source.asarray() image_size = image.size * image.itemsize @@ -202,8 +224,8 @@ def save_image_as_ome_zarr(source: OmeSource, data: np.ndarray, output_filename: pyramid_downsample = output_params.get('pyramid_downsample') zarr = OmeZarr(output_filename) - zarr.write(data, source, tile_size=tile_size, npyramid_add=npyramid_add, pyramid_downsample=pyramid_downsample, - compression=compression) + zarr.write(source, tile_size=tile_size, compression=compression, + npyramid_add=npyramid_add, pyramid_downsample=pyramid_downsample) def save_image_as_zarr(source: OmeSource, data: np.ndarray, output_filename: str, output_params: dict): diff --git a/OmeSliCC/image_util.py b/OmeSliCC/image_util.py index 2b66d0f..db6b723 100644 --- a/OmeSliCC/image_util.py +++ b/OmeSliCC/image_util.py @@ -8,6 +8,7 @@ import PIL.Image from PIL.ExifTags import TAGS import tifffile +from scipy.ndimage import gaussian_filter from skimage.transform import downscale_local_mean from tifffile import TiffFile @@ -429,6 +430,37 @@ def calc_fraction_used(image: np.ndarray, threshold: float = 0.1) -> float: return fraction +def blur_image_single(image, sigma): + return gaussian_filter(image, sigma) + + +def blur_image(image, sigma): + nchannels = image.shape[2] if image.ndim == 3 else 1 + if nchannels not in [1, 3]: + new_image = np.zeros_like(image) + for channeli in range(nchannels): + new_image[..., channeli] = blur_image_single(image[..., channeli], sigma) + else: + new_image = blur_image_single(image, sigma) + return new_image + + +def calc_tiles_median(images): + out_image = np.zeros_like(images[0]) + median_image = np.median(images, 0, out_image) + return median_image + + +def calc_tiles_quantiles(images, quantiles): + out_quantiles = [] + quantile_images = np.quantile(images, quantiles, 0) + for quantile_image in quantile_images: + maxval = 2 ** (8 * images[0].dtype.itemsize) - 1 + image = (quantile_image / maxval).astype(np.float32) + out_quantiles.append(image) + return out_quantiles + + def save_image(image: np.ndarray, filename: str, output_params: dict = {}): compression = output_params.get('compression') PIL.Image.fromarray(image).save(filename, compression=compression) diff --git a/OmeSliCC/ome_zarr_util.py b/OmeSliCC/ome_zarr_util.py index 26be4f2..8c28453 100644 --- a/OmeSliCC/ome_zarr_util.py +++ b/OmeSliCC/ome_zarr_util.py @@ -20,8 +20,10 @@ def create_axes_metadata(dimension_order): return axes -def create_transformation_metadata(dimension_order, pixel_size_um, scale): +def create_transformation_metadata(dimension_order, pixel_size_um, scale, translation=[]): + metadata = [] pixel_size_scale = [] + translation_scale = [] for dimension in dimension_order: if dimension == 'z' and len(pixel_size_um) > 2: pixel_size_scale1 = pixel_size_um[2] @@ -34,7 +36,21 @@ def create_transformation_metadata(dimension_order, pixel_size_um, scale): if pixel_size_scale1 == 0: pixel_size_scale1 = 1 pixel_size_scale.append(pixel_size_scale1) - return [{'scale': pixel_size_scale, 'type': 'scale'}] + + if dimension == 'z' and len(translation) > 2: + translation1 = translation[2] + elif dimension == 'y' and len(translation) > 1: + translation1 = translation[1] / scale + elif dimension == 'x' and len(translation) > 0: + translation1 = translation[0] / scale + else: + translation1 = 0 + translation_scale.append(translation1) + + metadata.append({'type': 'scale', 'scale': pixel_size_scale}) + if not all(v == 0 for v in translation_scale): + metadata.append({'type': 'translation', 'translation': translation_scale}) + return metadata def create_channel_metadata(source): @@ -50,7 +66,10 @@ def create_channel_metadata(source): for channeli, channel0 in enumerate(channels): channel = channel0.copy() if 'color' in channel: - channel['color'] = rgba_to_hexrgb(channel['color']) + color = rgba_to_hexrgb(channel['color']) + else: + color = '' + channel['color'] = color if 'window' not in channel: channel['window'] = source.get_channel_window(channeli) omezarr_channels.append(channel) diff --git a/tests/stitching.py b/tests/stitching.py new file mode 100644 index 0000000..eeebd9e --- /dev/null +++ b/tests/stitching.py @@ -0,0 +1,117 @@ +import glob +import numpy as np +import os + +import tifffile +from tifffile import xml2dict +from tqdm import tqdm + +from OmeSliCC.conversion import store_tiles +from OmeSliCC.TiffSource import TiffSource +from OmeSliCC.image_util import * + + +def get_normalisation_images(composition_metadata, quantiles, scale=1.0): + images = [] + for quantile in quantiles: + filename = os.path.join(os.path.dirname(output_filename), f'tile_quantile_{quantile}.tiff') + if os.path.exists(filename): + images.append(tifffile.imread(filename)) + if len(images) < len(quantiles): + images = create_normalisation_images(composition_metadata, quantiles, scale) + for image, quantile in zip(images, quantiles): + filename = os.path.join(os.path.dirname(output_filename), f'tile_quantile_{quantile}.tiff') + tifffile.imwrite(filename, image, compression='LZW') + image = blur_image(image, 10) + filename = os.path.join(os.path.dirname(output_filename), f'tile_quantile_{quantile}_smooth.tiff') + tifffile.imwrite(filename, image, compression='LZW') + return images + + +def create_normalisation_images(composition_metadata, quantiles, scale=1.0): + source0 = TiffSource(tile_filenames[0]) + size0 = source0.get_size() + new_size = tuple(np.round(np.multiply(size0, scale)).astype(int)) + nchannels = len(source0.get_channels()) + channel_images = [] + for channeli in range(nchannels): + # filter edge tiles + positions = [(metadata['Bounds']['StartX'], metadata['Bounds']['StartY']) for metadata in composition_metadata] + edges = np.array((np.min(positions, 0), np.max(positions, 0))) + x_edges, y_edges = edges[:, 0], edges[:, 1] + filtered_tiles = [] + for metadata in composition_metadata: + if metadata['Bounds']['StartX'] not in x_edges and metadata['Bounds']['StartY'] not in y_edges: + filename0 = metadata['Filename'] + for tile_filename in tile_filenames: + if filename0 in tile_filename: + filtered_tiles.append(tile_filename) + + images = [] + print('Loading tiles') + for tile_filename in tqdm(tile_filenames): + image = TiffSource(tile_filename).asarray()[..., channeli] + if scale != 1: + image = image_resize(image, new_size) + images.append(image) + + # filter non-empty tiles + median_image = calc_tiles_median(images) + print('Filtering tiles with signal') + difs = [np.mean(np.abs(image.astype(np.float32) - median_image.astype(np.float32)), (0, 1)) for image in images] + threshold = np.mean(difs, 0) + images = [image for image, dif in zip(images, difs) if np.all(dif < threshold)] + + norm_images0 = calc_tiles_quantiles(images, quantiles) + norm_images = [] + for image in norm_images0: + if scale != 1: + image = image_resize(image, size0, preserve_range=True) + norm_images.append(image) + channel_images.append(norm_images) + + quatile_images = [] + for quatilei in range(len(quantiles)): + quatile_image = None + for channel_image in channel_images: + image = channel_image[quatilei] + if quatile_image is None: + quatile_image = image + else: + quatile_image = cv.merge(list(cv.split(quatile_image)) + [image]) + quatile_images.append(quatile_image) + return quatile_images + + +def flatfield_correction(image0, dark=0, bright=1, clip=True): + # https://imagej.net/plugins/bigstitcher/flatfield-correction + mean_bright_dark = np.mean(bright - dark, (0, 1)) + image = (image0 - dark) * mean_bright_dark / (bright - dark) + if clip: + image = np.clip(image, 0, 1) + return image + + +def do_flatfield_correction(image): + dtype = image.dtype + image = float2int_image(flatfield_correction(int2float_image(image), dark), dtype) + return image + + +if __name__ == '__main__': + tile_path = 'D:/slides/EM04573_01t/Multichannel tiles/*.tif*' + tile_filenames = glob.glob(tile_path) + metadata_filename = 'D:/slides/EM04573_01t/EM04573_01_20x_beads-07_info_ome_tiff.xml' + composition_metadata = xml2dict(open(metadata_filename, encoding='utf8').read())['ExportDocument']['Image'] + + params = {'output': {}} + output_filename = 'D:/slides/tiles.ome.zarr' + + flatfield_quantiles = [0.05] + flatfield_scale = 1.0 + norm_images = get_normalisation_images(composition_metadata, flatfield_quantiles, scale=flatfield_scale) + dark = norm_images[0] + + sources = [TiffSource(tile_filename) for tile_filename in tqdm(tile_filenames)] + + store_tiles(sources, output_filename, params, composition_metadata, image_operations=[do_flatfield_correction])