Skip to content

Commit

Permalink
Merge pull request #16 from georgeaidinis/main
Browse files Browse the repository at this point in the history
Solution to weird artifacts created by DLICV
  • Loading branch information
ashishsingh18 authored Sep 14, 2023
2 parents a2f82ed + 063a018 commit 478d589
Show file tree
Hide file tree
Showing 3 changed files with 104 additions and 7 deletions.
29 changes: 28 additions & 1 deletion niCHARTPipelines/CalculateROIVolume.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def calc_roi_volumes(mrid, in_img_file, label_indices = []):

## Get label indices
if label_indices.shape[0] == 0:
logger.warning('Label indices not provided, generating from data')
# logger.warning('Label indices not provided, generating from data')
label_indices = u_ind

label_names = label_indices.astype(str)
Expand Down Expand Up @@ -99,3 +99,30 @@ def create_roi_csv(scan_id, in_roi, list_single_roi, map_derived_roi, out_img, o

## Write out csv
df_dmuse.to_csv(out_csv, index = False)

###---------calculate ROI volumes-----------
def extract_roi_masks(in_roi, map_derived_roi, out_pref):
'''Create individual roi masks for single and derived rois
'''
img_ext_type = '.nii.gz'

## Read image
in_nii = nib.load(in_roi)
img_mat = in_nii.get_fdata().astype(int)

## Read derived roi map file to a dictionary
roi_dict = {}
with open(map_derived_roi) as roi_map:
reader = csv.reader(roi_map, delimiter=',')
for row in reader:
key = str(row[0])
val = [int(x) for x in row[2:]]
roi_dict[key] = val

# Create an individual roi mask for each roi
for i, key in enumerate(roi_dict):
print(i)
key_vals = roi_dict[key]
tmp_mask = np.isin(img_mat, key_vals).astype(int)
out_nii = nib.Nifti1Image(tmp_mask, in_nii.affine, in_nii.header)
nib.save(out_nii, out_pref + '_' + str(key) + img_ext_type)
64 changes: 60 additions & 4 deletions niCHARTPipelines/CombineMasks.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,53 @@
import nibabel as nib
import numpy as np
from nibabel.orientations import axcodes2ornt, inv_ornt_aff, ornt_transform
from nipype.interfaces.image import Reorient
from scipy import ndimage
from scipy.ndimage.measurements import label


## Find bounding box for the foreground values in img, with a given padding percentage
def calc_bbox_with_padding(img, perc_pad = 10):

img = img.astype('uint8')

## Output is the coordinates of the bounding box
bcoors = np.zeros([3,2], dtype=int)

## Find the largest connected component
## INFO: In images with very large FOV DLICV may have small isolated regions in
## boundaries; so we calculate the bounding box based on the brain, not all
## foreground voxels
str_3D = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
[[0, 1, 0], [1, 1, 1], [0, 1, 0]],
[[0, 0, 0], [0, 1, 0], [0, 0, 0]]], dtype='uint8')
labeled, ncomp = label(img, str_3D)
sizes = ndimage.sum(img, labeled, range(ncomp + 1))
img_largest_cc = (labeled == np.argmax(sizes)).astype(int)

## Find coors in each axis
for sel_axis in [0, 1, 2]:

## Get axes other than the selected
other_axes = [0, 1, 2]
other_axes.remove(sel_axis)

## Get img dim in selected axis
dim = img_largest_cc.shape[sel_axis]

## Find bounding box (index of first and last non-zero slices)
nonzero = np.any(img_largest_cc, axis = tuple(other_axes))
bbox= np.where(nonzero)[0][[0,-1]]

## Add padding
size_pad = int(np.round((bbox[1] - bbox[0]) * perc_pad / 100))
b_min = int(np.max([0, bbox[0] - size_pad]))
b_max = int(np.min([dim, bbox[1] + size_pad]))

bcoors[sel_axis, :] = [b_min, b_max]

return bcoors

def apply_combine(in_img_name, icv_img_name, out_img_name):
'''Combine icv and muse masks.
'''
Expand All @@ -12,13 +57,24 @@ def apply_combine(in_img_name, icv_img_name, out_img_name):

img_in = nii_in.get_fdata()
img_icv = nii_icv.get_fdata()

################################
## INFO: nnunet hallucinated on images with large FOV. To solve this problem
## we added pre/post processing steps to crop initial image around ICV
## mask before sending to DLMUSE
##
## MUSE image (img_in) may be cropped. Pad it to initial image size
bcoors = calc_bbox_with_padding(img_icv)
img_out = img_icv * 0
img_out[bcoors[0,0]:bcoors[0,1], bcoors[1,0]:bcoors[1,1], bcoors[2,0]:bcoors[2,1]] = img_in
################################

# Merge masks : Add a new label (1) to MUSE for foreground voxels in ICV that is not in MUSE
# this label will mainly represent cortical CSF
img_in[(img_in==0) & (img_icv>0)] = 1
img_out[(img_out==0) & (img_icv>0)] = 1

img_in = img_in.astype(int)
img_out = img_out.astype(int)

## Save out image
nii_out = nib.Nifti1Image(img_in, nii_in.affine, nii_in.header)
nii_out.to_filename(out_img_name)
nii_out = nib.Nifti1Image(img_out, nii_in.affine, nii_in.header)
nii_out.to_filename(out_img_name)
18 changes: 16 additions & 2 deletions niCHARTPipelines/MaskImage.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
import numpy as np
import nibabel as nib
from scipy import ndimage
from scipy.ndimage.measurements import label

from niCHARTPipelines.CombineMasks import calc_bbox_with_padding


###---------mask image-----------
Expand All @@ -13,6 +18,15 @@ def apply_mask(in_img_name, mask_img_name, out_img_name):
## Mask image
img_in[img_mask == 0] = 0

################################
## INFO: nnunet hallucinated on images with large FOV. To solve this problem
## we added pre/post processing steps to crop initial image around ICV
## mask before sending to DLMUSE
##
## Crop image
bcoors = calc_bbox_with_padding(img_mask)
img_in_crop = img_in[bcoors[0,0]:bcoors[0,1], bcoors[1,0]:bcoors[1,1], bcoors[2,0]:bcoors[2,1]]

## Save out image
nii_out = nib.Nifti1Image(img_in, nii_in.affine, nii_in.header)
nii_out.to_filename(out_img_name)
nii_out = nib.Nifti1Image(img_in_crop, nii_in.affine, nii_in.header)
nii_out.to_filename(out_img_name)

0 comments on commit 478d589

Please sign in to comment.