diff --git a/.Rproj.user/6A9A55E1/pcs/files-pane.pper b/.Rproj.user/6A9A55E1/pcs/files-pane.pper index 8f3f41a..0aff252 100644 --- a/.Rproj.user/6A9A55E1/pcs/files-pane.pper +++ b/.Rproj.user/6A9A55E1/pcs/files-pane.pper @@ -5,5 +5,5 @@ "ascending": true } ], - "path": "~/Desktop/WHIMs" + "path": "~/Desktop/WHIMs/R" } \ No newline at end of file diff --git a/.Rproj.user/6A9A55E1/pcs/windowlayoutstate.pper b/.Rproj.user/6A9A55E1/pcs/windowlayoutstate.pper index 4bbe760..7587311 100644 --- a/.Rproj.user/6A9A55E1/pcs/windowlayoutstate.pper +++ b/.Rproj.user/6A9A55E1/pcs/windowlayoutstate.pper @@ -1,12 +1,12 @@ { "left": { - "splitterpos": 351, + "splitterpos": 345, "topwindowstate": "NORMAL", "panelheight": 682, "windowheight": 720 }, "right": { - "splitterpos": 523, + "splitterpos": 373, "topwindowstate": "NORMAL", "panelheight": 682, "windowheight": 720 diff --git a/NAMESPACE b/NAMESPACE index b2f5e85..84e5aa3 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,5 +1,6 @@ # Generated by roxygen2: do not edit by hand +export(PartitionPipeline) export(eve_Fl) export(eve_T1) export(run_partition_pipeline) diff --git a/R/PartitionPipeline.R b/R/PartitionPipeline.R index 92fbced..47434f3 100644 --- a/R/PartitionPipeline.R +++ b/R/PartitionPipeline.R @@ -1,5 +1,11 @@ -#Purpose: Make an object for this pipeline, to streamline them +#' Partition Pipeline for Image Analysis +#' +#' This R6 class is designed to streamline the processing pipeline for image analysis, +#' including steps from initial processing to combining independent variables with +#' reduced variables by tissue type by ROI. +#' #' @import R6 +#' @export PartitionPipeline <- R6Class("PartitionPipeline", public = list( tind = NULL, diff --git a/README.Rmd b/README.Rmd index 9598864..ac1fafe 100644 --- a/README.Rmd +++ b/README.Rmd @@ -11,6 +11,11 @@ knitr::opts_chunk$set(echo = TRUE) [![USC IMAGE](https://raw.githubusercontent.com/USCbiostats/badges/master/tommy-image-badge.svg)](https://image.usc.edu) +## Introduction + +NeuroPartitioner is an advanced R package designed to convert NIfTI format T1/FL neuroimages into structured, high-dimensional 2D data frames, focusing on region of interest (ROI) based processing. This package incorporates a key algorithm called ‘partition’, which offers a fast, flexible framework for agglomerative partitioning based on the Direct-Measure-Reduce approach. This method ensures that each reduced variable maintains a user-specified minimum level of information while being interpretable, as each maps uniquely to one variable in the reduced dataset. The ‘partition’ algorithm, detailed in Millstein et al. (2020), allows for customization in variable selection, measurement of information loss, and data reduction methods. NeuroPartitioner is indispensable for researchers requiring efficient, accurate preparation of neuroimaging data for detailed statistical analysis and machine learning applications, enhancing the interpretability and utility of neuroimaging studies. + + ## Installation Instructions This document provides detailed steps to install the necessary dependencies for the package. Please follow the instructions carefully to ensure all dependencies are correctly installed. diff --git a/README.md b/README.md index 8bf1a4e..5bd3431 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,24 @@ NeuroPartitioner IMAGE](https://raw.githubusercontent.com/USCbiostats/badges/master/tommy-image-badge.svg)](https://image.usc.edu) +## Introduction + +NeuroPartitioner is an advanced R package designed to convert NIfTI +format T1/FL neuroimages into structured, high-dimensional 2D data +frames, focusing on region of interest (ROI) based processing. This +package incorporates a key algorithm called ‘partition’, which offers a +fast, flexible framework for agglomerative partitioning based on the +Direct-Measure-Reduce approach. This method ensures that each reduced +variable maintains a user-specified minimum level of information while +being interpretable, as each maps uniquely to one variable in the +reduced dataset. The ‘partition’ algorithm, detailed in Millstein et +al. (2020), allows for customization in variable selection, measurement +of information loss, and data reduction methods. NeuroPartitioner is +indispensable for researchers requiring efficient, accurate preparation +of neuroimaging data for detailed statistical analysis and machine +learning applications, enhancing the interpretability and utility of +neuroimaging studies. + ## Installation Instructions This document provides detailed steps to install the necessary diff --git a/_pkgdown.yml b/_pkgdown.yml index ad5a18d..a73cb0e 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -1,4 +1,18 @@ url: https://jtian123.github.io/WHIMs/ template: bootstrap: 5 + bootswatch: cosmo # Using the Flatly theme from Bootswatch +navbar: + title: NeuroPartitioner + left: + - text: Home + href: index.html + - text: Get Started + href: articles/NeuroPartitioner.html # Link directly to your vignette + - text: Reference + href: reference/index.html + right: + - text: View on source code + href: https://github.com/jtian123/WHIMs + icon: fab fa-github diff --git a/man/PartitionPipeline.Rd b/man/PartitionPipeline.Rd new file mode 100644 index 0000000..372afd8 --- /dev/null +++ b/man/PartitionPipeline.Rd @@ -0,0 +1,135 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/PartitionPipeline.R +\name{PartitionPipeline} +\alias{PartitionPipeline} +\title{Partition Pipeline for Image Analysis} +\description{ +This R6 class is designed to streamline the processing pipeline for image analysis, +including steps from initial processing to combining independent variables with +reduced variables by tissue type by ROI. +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-PartitionPipeline-new}{\code{PartitionPipeline$new()}} +\item \href{#method-PartitionPipeline-iproc}{\code{PartitionPipeline$iproc()}} +\item \href{#method-PartitionPipeline-supparfun}{\code{PartitionPipeline$supparfun()}} +\item \href{#method-PartitionPipeline-map_suppar_roi}{\code{PartitionPipeline$map_suppar_roi()}} +\item \href{#method-PartitionPipeline-parfun}{\code{PartitionPipeline$parfun()}} +\item \href{#method-PartitionPipeline-tissue_segment}{\code{PartitionPipeline$tissue_segment()}} +\item \href{#method-PartitionPipeline-Cmb_tissue_type}{\code{PartitionPipeline$Cmb_tissue_type()}} +\item \href{#method-PartitionPipeline-process_indep_variables}{\code{PartitionPipeline$process_indep_variables()}} +\item \href{#method-PartitionPipeline-Cmb_indep_with_dep}{\code{PartitionPipeline$Cmb_indep_with_dep()}} +\item \href{#method-PartitionPipeline-clone}{\code{PartitionPipeline$clone()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-new}{}}} +\subsection{Method \code{new()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$new( + tind = NULL, + nfl = NULL, + main_dir = NULL, + tissue_type = NULL, + outp_volume = TRUE, + ICC_thresh_vec = NULL, + suppar_thresh_vec = seq(0.7, 1, 0.01), + B = 2000, + roi = NULL, + num_cores = NULL +)}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-iproc}{}}} +\subsection{Method \code{iproc()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$iproc()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-supparfun}{}}} +\subsection{Method \code{supparfun()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$supparfun()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-map_suppar_roi}{}}} +\subsection{Method \code{map_suppar_roi()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$map_suppar_roi()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-parfun}{}}} +\subsection{Method \code{parfun()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$parfun()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-tissue_segment}{}}} +\subsection{Method \code{tissue_segment()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$tissue_segment()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-Cmb_tissue_type}{}}} +\subsection{Method \code{Cmb_tissue_type()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$Cmb_tissue_type()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-process_indep_variables}{}}} +\subsection{Method \code{process_indep_variables()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$process_indep_variables()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-Cmb_indep_with_dep}{}}} +\subsection{Method \code{Cmb_indep_with_dep()}}{ +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$Cmb_indep_with_dep()}\if{html}{\out{
}} +} + +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-PartitionPipeline-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{PartitionPipeline$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/vignettes/NeuroPartitioner.Rmd b/vignettes/NeuroPartitioner.Rmd index d1f0284..c2c290a 100644 --- a/vignettes/NeuroPartitioner.Rmd +++ b/vignettes/NeuroPartitioner.Rmd @@ -35,8 +35,9 @@ datatable(lab_df[c("integer_label", "text_label", "structure")], caption = 'Interactive table of complete data from the brain regions dataset.') ``` -## Process T1-weighted Brain MRI Data with FSL and Register to EVE Atlas +

+## Process T1-weighted Brain MRI Data with FSL and Register to EVE Atlas Ensure FSL is installed as per the instructions provided in the [package README](https://github.com/jtian123/WHIMs). @@ -59,16 +60,22 @@ eve_T1(fpath, outpath, fsl_path , fsl_outputtype = "NIFTI_GZ") **fpath**: A character string specifying the path to one T1-weighted MRI file. The file should be in NIFTI file format (.nii.gz). Processing may take some time, so please be patient. For handling multiple MRI files, consider using parallel processing with R's parallel computation packages or through high-performance computing resources to improve efficiency. +

+ ## Run Partition Pipeline on Neuroimaging Data This section describes how to utilize the pipeline for processing neuroimaging data through sequential application of sophisticated algorithms and segmentation based on Regions of Interest (ROIs). +
+ ### Pipeline Overview: - **Super-Partition**: Applies Josh's super-partition algorithm, which considers 3D locations to group data based on ROIs. - **Partition Algorithm**: Processes data post Super-Partition using the [Partition algorithm](https://github.com/USCbiostats/partition), enhancing data structuring. - **Tissue Segmentation**: Segments the processed data by tissue type within each ROI. +
+ ### Practical Example: To process the ROI named "inferior_frontal_gyrus", identify the corresponding `tind` (in this example, `tind = 5`) from the **Region Labels and Structures** section. You'll need to set up a directory to manage all processing files and datasets. Note that the outputs from this pipeline will not be returned directly but will be stored at specified locations: @@ -76,6 +83,8 @@ To process the ROI named "inferior_frontal_gyrus", identify the corresponding `t - Intensity data: **/main_dir/partition/roi/thresh/tissue_type/cmb/intensities_whole.rds** - Volume data: **/main_dir/partition/roi/thresh/tissue_type/cmb/volume_whole.rds** +
+ ### Parallel Processing: This function is equipped with parallel processing capabilities, allowing users to specify the number of cores they wish to utilize. Increasing the number of cores will proportionally speed up the Partition process, offering significant time savings for large datasets.