Skip to content

Commit

Permalink
update stable adn dev documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
skoudoro committed Dec 14, 2024
1 parent 8543ae5 commit 016e611
Show file tree
Hide file tree
Showing 1,291 changed files with 369,226 additions and 95,433 deletions.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@
file is usually located in the ``dipy/workflows`` directory.
"""

import os

from dipy.workflows.combined_workflow import CombinedWorkflow

###############################################################################
# ``CombinedWorkflow`` is the base class that will be extended to create our
# combined workflow.

from dipy.workflows.denoise import NLMeansFlow
from dipy.workflows.segment import MedianOtsuFlow

Expand All @@ -25,24 +26,27 @@


class DenoiseAndSegment(CombinedWorkflow):

"""
``DenoiseAndSegment`` is the name of our combined workflow. Note that
it needs to extend CombinedWorkflow for everything to work properly.
"""

def _get_sub_flows(self):
return [
NLMeansFlow,
MedianOtsuFlow
]
return [NLMeansFlow, MedianOtsuFlow]

"""
It is mandatory to implement this method if you want to make all the
sub workflows parameters available in commandline.
"""

def run(self, input_files, out_dir='', out_file='processed.nii.gz'):
def run(
self,
input_files,
out_dir="",
out_denoised="processed.nii.gz",
out_mask="brain_mask.nii.gz",
out_masked="dwi_masked.nii.gz",
):
"""
Parameters
----------
Expand All @@ -53,8 +57,14 @@ def run(self, input_files, out_dir='', out_file='processed.nii.gz'):
out_dir : string, optional
Where the resulting file will be saved. (default '')
out_file : string, optional
Name of the result file to be saved. (default 'processed.nii.gz')
out_denoised : string, optional
Name of the denoised file to be saved.
out_mask : string, optional
Name of the Otsu mask file to be saved.
out_masked : string, optional
Name of the Otsu masked file to be saved.
"""

"""
Expand All @@ -70,13 +80,27 @@ def run(self, input_files, out_dir='', out_file='processed.nii.gz'):

io_it = self.get_io_iterator()

for in_file, out_file in io_it:
for fnames in io_it:
in_fname = fnames[0]
_out_denoised = os.path.basename(fnames[1])
_out_mask = os.path.basename(fnames[2])
_out_masked = os.path.basename(fnames[3])

nl_flow = NLMeansFlow()
self.run_sub_flow(nl_flow, in_file, out_dir=out_dir)
denoised = nl_flow.last_generated_outputs['out_denoised']
self.run_sub_flow(
nl_flow, in_fname, out_dir=out_dir, out_denoised=_out_denoised
)
denoised = nl_flow.last_generated_outputs["out_denoised"]

me_flow = MedianOtsuFlow()
self.run_sub_flow(me_flow, denoised, out_dir=out_dir)
self.run_sub_flow(
me_flow,
denoised,
out_dir=out_dir,
out_mask=_out_mask,
out_masked=_out_masked,
)


###############################################################################
# Use ``self.get_io_iterator()`` in every workflow you create. This creates
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,58 +7,58 @@
challenges for diffusion MRI. One proposal for evaluation of tractography
results is to use a forward model that predicts the signal from each of a set
of streamlines, and then fit a linear model to these simultaneous predictions
[Pestilli2014]_.
:footcite:p:`Pestilli2014`.
We will use streamlines generated using probabilistic tracking on CSA
peaks. For brevity, we will include in this example only streamlines going
through the corpus callosum connecting left to right superior frontal
cortex. The process of tracking and finding these streamlines is fully
demonstrated in the :ref:`streamline_tools` example. If this example has been
run, we can read the streamlines from file. Otherwise, we'll run that example
first, by importing it. This provides us with all of the variables that were
created in that example:
demonstrated in the
:ref:`sphx_glr_examples_built_streamline_analysis_streamline_tools.py` example.
If this example has been run, we can read the streamlines from file. Otherwise,
we'll run that example first, by importing it. This provides us with all of the
variables that were created in that example:
"""

from os.path import join as pjoin

import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid

import dipy.core.optimize as opt
from dipy.data import fetch_stanford_tracks
from dipy.io.streamline import load_trk
import dipy.tracking.life as life
from dipy.viz import window, actor, colormap as cmap
import numpy as np

# We'll need to know where the corpus callosum is from these variables:
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
import dipy.core.optimize as opt
from dipy.data import fetch_stanford_tracks, get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti_data, load_nifti
from dipy.io.image import load_nifti, load_nifti_data
from dipy.io.streamline import load_trk
import dipy.tracking.life as life
from dipy.viz import actor, colormap as cmap, window

hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi')
label_fname = get_fnames('stanford_labels')
t1_fname = get_fnames('stanford_t1')
hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames(name="stanford_hardi")
label_fname = get_fnames(name="stanford_labels")
t1_fname = get_fnames(name="stanford_t1")

data, affine, hardi_img = load_nifti(hardi_fname, return_img=True)
labels = load_nifti_data(label_fname)
t1_data = load_nifti_data(t1_fname)
bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname)
gtab = gradient_table(bvals, bvecs)
gtab = gradient_table(bvals, bvecs=bvecs)

cc_slice = labels == 2

# Let's now fetch a set of streamlines from the Stanford HARDI dataset.
# Those streamlines were generated during the :ref:`streamline_tools` example.
# Those streamlines were generated during the
# :ref:`sphx_glr_examples_built_streamline_analysis_streamline_tools.py` example.
# Read the candidates from file in voxel space:

streamlines_files = fetch_stanford_tracks()
lr_superiorfrontal_path = pjoin(streamlines_files[1],
'hardi-lr-superiorfrontal.trk')
lr_superiorfrontal_path = pjoin(streamlines_files[1], "hardi-lr-superiorfrontal.trk")

candidate_sl_sft = load_trk(lr_superiorfrontal_path, 'same')
candidate_sl_sft = load_trk(lr_superiorfrontal_path, "same")
candidate_sl_sft.to_vox()
candidate_sl = candidate_sl_sft.streamlines

Expand All @@ -73,10 +73,10 @@
# Enables/disables interactive visualization
interactive = False

candidate_streamlines_actor = actor.streamtube(candidate_sl,
cmap.line_colors(candidate_sl))
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1., 1., 0.),
opacity=0.5)
candidate_streamlines_actor = actor.streamtube(
candidate_sl, colors=cmap.line_colors(candidate_sl)
)
cc_ROI_actor = actor.contour_from_roi(cc_slice, color=(1.0, 1.0, 0.0), opacity=0.5)

vol_actor = actor.slicer(t1_data)

Expand All @@ -90,9 +90,7 @@
scene.add(cc_ROI_actor)
scene.add(vol_actor)
scene.add(vol_actor2)
window.record(scene, n_frames=1,
out_path='life_candidates.png',
size=(800, 800))
window.record(scene=scene, n_frames=1, out_path="life_candidates.png", size=(800, 800))
if interactive:
window.show(scene)

Expand Down Expand Up @@ -139,7 +137,7 @@
# the voxels. The expected contributions of the streamline are calculated
# using a forward model, where each node of the streamline is modeled as a
# cylindrical fiber compartment with Gaussian diffusion, using the diffusion
# tensor model. See [Pestilli2014]_ for more detail on the model, and
# tensor model. See :footcite:p:`Pestilli2014` for more detail on the model, and
# variations of this model.

fiber_fit = fiber_model.fit(data, candidate_sl, affine=np.eye(4))
Expand All @@ -151,10 +149,10 @@
# redundant streamlines, and these streamlines will have $\beta_i$ that are 0.

fig, ax = plt.subplots(1)
ax.hist(fiber_fit.beta, bins=100, histtype='step')
ax.set_xlabel('Fiber weights')
ax.set_ylabel('# fibers')
fig.savefig('beta_histogram.png')
ax.hist(fiber_fit.beta, bins=100, histtype="step")
ax.set_xlabel("Fiber weights")
ax.set_ylabel("# fibers")
fig.savefig("beta_histogram.png")

###############################################################################
# .. rst-class:: centered small fst-italic fw-semibold
Expand All @@ -166,13 +164,12 @@
# We use $\beta$ to filter out these redundant streamlines, and generate an
# optimized group of streamlines:

optimized_sl = [np.row_stack(candidate_sl)[np.where(fiber_fit.beta > 0)[0]]]
optimized_sl = [np.vstack(candidate_sl)[np.where(fiber_fit.beta > 0)[0]]]
scene = window.Scene()
scene.add(actor.streamtube(optimized_sl, cmap.line_colors(optimized_sl)))
scene.add(actor.streamtube(optimized_sl, colors=cmap.line_colors(optimized_sl)))
scene.add(cc_ROI_actor)
scene.add(vol_actor)
window.record(scene, n_frames=1, out_path='life_optimized.png',
size=(800, 800))
window.record(scene=scene, n_frames=1, out_path="life_optimized.png", size=(800, 800))
if interactive:
window.show(scene)

Expand Down Expand Up @@ -215,9 +212,10 @@
# voxel.

beta_baseline = np.zeros(fiber_fit.beta.shape[0])
pred_weighted = np.reshape(opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
pred_weighted = np.reshape(
opt.spdot(fiber_fit.life_matrix, beta_baseline),
(fiber_fit.vox_coords.shape[0], np.sum(~gtab.b0s_mask)),
)
mean_pred = np.empty((fiber_fit.vox_coords.shape[0], gtab.bvals.shape[0]))
S0 = fiber_fit.b0_signal

Expand All @@ -226,10 +224,11 @@
# to add back the mean and then multiply by S0 in every voxel:

mean_pred[..., gtab.b0s_mask] = S0[:, None]
mean_pred[..., ~gtab.b0s_mask] =\
(pred_weighted + fiber_fit.mean_signal[:, None]) * S0[:, None]
mean_pred[..., ~gtab.b0s_mask] = (pred_weighted + fiber_fit.mean_signal[:, None]) * S0[
:, None
]
mean_error = mean_pred - fiber_fit.data
mean_rmse = np.sqrt(np.mean(mean_error ** 2, -1))
mean_rmse = np.sqrt(np.mean(mean_error**2, -1))

###############################################################################
# First, we can compare the overall distribution of errors between these two
Expand All @@ -239,18 +238,26 @@
# to without the model fit.

fig, ax = plt.subplots(1)
ax.hist(mean_rmse - model_rmse, bins=100, histtype='step')
ax.text(0.2, 0.9, 'Median RMSE, mean model: %.2f' % np.median(mean_rmse),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.text(0.2, 0.8, 'Median RMSE, LiFE: %.2f' % np.median(model_rmse),
horizontalalignment='left',
verticalalignment='center',
transform=ax.transAxes)
ax.set_xlabel('RMS Error')
ax.set_ylabel('# voxels')
fig.savefig('error_histograms.png')
ax.hist(mean_rmse - model_rmse, bins=100, histtype="step")
ax.text(
0.2,
0.9,
f"Median RMSE, mean model: {np.median(mean_rmse):.2f}",
horizontalalignment="left",
verticalalignment="center",
transform=ax.transAxes,
)
ax.text(
0.2,
0.8,
f"Median RMSE, LiFE: {np.median(model_rmse):.2f}",
horizontalalignment="left",
verticalalignment="center",
transform=ax.transAxes,
)
ax.set_xlabel("RMS Error")
ax.set_ylabel("# voxels")
fig.savefig("error_histograms.png")

###############################################################################
# .. rst-class:: centered small fst-italic fw-semibold
Expand All @@ -265,37 +272,39 @@
# and of the improvement with the model fit:

vol_model = np.ones(data.shape[:3]) * np.nan
vol_model[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = model_rmse
vol_model[
fiber_fit.vox_coords[:, 0], fiber_fit.vox_coords[:, 1], fiber_fit.vox_coords[:, 2]
] = model_rmse
vol_mean = np.ones(data.shape[:3]) * np.nan
vol_mean[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse
vol_mean[
fiber_fit.vox_coords[:, 0], fiber_fit.vox_coords[:, 1], fiber_fit.vox_coords[:, 2]
] = mean_rmse
vol_improve = np.ones(data.shape[:3]) * np.nan
vol_improve[fiber_fit.vox_coords[:, 0],
fiber_fit.vox_coords[:, 1],
fiber_fit.vox_coords[:, 2]] = mean_rmse - model_rmse
vol_improve[
fiber_fit.vox_coords[:, 0], fiber_fit.vox_coords[:, 1], fiber_fit.vox_coords[:, 2]
] = mean_rmse - model_rmse
sl_idx = 49
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95)
ax = AxesGrid(fig, 111,
nrows_ncols=(1, 3),
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%")
ax = AxesGrid(
fig,
111,
nrows_ncols=(1, 3),
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="10%",
cbar_pad="5%",
)
ax[0].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[0].matshow(np.rot90(vol_model[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[0].colorbar(im)
ax[1].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[1].matshow(np.rot90(vol_mean[sl_idx, :, :]), cmap=matplotlib.cm.hot)
ax.cbar_axes[1].colorbar(im)
ax[2].matshow(np.rot90(t1_data[sl_idx, :, :]), cmap=matplotlib.cm.bone)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]),
cmap=matplotlib.cm.RdBu)
im = ax[2].matshow(np.rot90(vol_improve[sl_idx, :, :]), cmap=matplotlib.cm.RdBu)
ax.cbar_axes[2].colorbar(im)
for lax in ax:
lax.set_xticks([])
Expand Down Expand Up @@ -327,9 +336,8 @@
# References
# ----------
#
# .. [Pestilli2014] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell
# B.A. (2014). Validation and statistical inference in living connectomes.
# Nature Methods 11: 1058-1063. doi:10.1038/nmeth.3098
# .. footbibliography::
#

###############################################################################
# .. include:: ../../links_names.inc
Expand Down
Loading

0 comments on commit 016e611

Please sign in to comment.