diff --git a/nimare/meta/ibma.py b/nimare/meta/ibma.py index 6aba68ddd..4d19aa969 100755 --- a/nimare/meta/ibma.py +++ b/nimare/meta/ibma.py @@ -3,6 +3,7 @@ from __future__ import division import logging +from collections import Counter import nibabel as nib import numpy as np @@ -104,8 +105,6 @@ def _preprocess_input(self, dataset): # A dictionary to collect data, to be further reduced by the liberal mask. self.inputs_["data_bags"] = {} - # A dictionary to collect masked image data, to be further reduced by the aggressive mask. - temp_image_inputs = {} for name, (type_, _) in self._required_inputs.items(): if type_ == "image": # Resampling will only occur if shape/affines are different @@ -124,7 +123,8 @@ def _preprocess_input(self, dataset): # Mask required input images using either the dataset's mask or the estimator's. temp_arr = masker.transform(img4d) - temp_image_inputs[name] = temp_arr + # To save memory, we only save the original image array and perform masking later. + self.inputs_[name] = temp_arr if self.aggressive_mask: # An intermediate step to mask out bad voxels. # Can be dropped once PyMARE is able to handle masked arrays or missing data. @@ -141,7 +141,6 @@ def _preprocess_input(self, dataset): good_voxels_bool, ) else: - self.inputs_[name] = temp_arr data_bags = zip(*_apply_liberal_mask(temp_arr)) keys = ["values", "voxel_mask", "study_mask"] @@ -150,20 +149,14 @@ def _preprocess_input(self, dataset): # Further reduce image-based inputs to remove "bad" voxels # (voxels with zeros or NaNs in any studies) if "aggressive_mask" in self.inputs_.keys(): - n_bad_voxels = ( + if n_bad_voxels := ( self.inputs_["aggressive_mask"].size - self.inputs_["aggressive_mask"].sum() - ) - if n_bad_voxels: + ): LGR.warning( f"Masking out {n_bad_voxels} additional voxels. " "The updated masker is available in the Estimator.masker attribute." ) - for name, raw_masked_data in temp_image_inputs.items(): - self.inputs_[name] = raw_masked_data[:, self.inputs_["aggressive_mask"]] - - self.inputs_["raw_data"] = temp_image_inputs # This data is saved only to use in Reports - class Fishers(IBMAEstimator): """An image-based meta-analytic test using t- or z-statistic images. @@ -175,8 +168,8 @@ class Fishers(IBMAEstimator): .. versionchanged:: 0.2.3 * New parameter: ``two_sided``, controls the type of test to be performed. In addition, - the default is now set to True (two-sided), which differs from previous versions - where only one-sided tests were performed. + the default is now set to True (two-sided), which differs from previous versions + where only one-sided tests were performed. .. versionchanged:: 0.2.1 @@ -245,6 +238,21 @@ def _generate_description(self): ) return description + def _fit_model(self, stat_maps): + """Fit the model to the data.""" + n_studies, n_voxels = stat_maps.shape + + pymare_dset = pymare.Dataset(y=stat_maps) + est = pymare.estimators.FisherCombinationTest(mode=self._mode) + est.fit_dataset(pymare_dset) + est_summary = est.summary() + + z_map = est_summary.z.squeeze() + p_map = est_summary.p.squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker @@ -257,32 +265,23 @@ def _fit(self, dataset): ) if self.aggressive_mask: - pymare_dset = pymare.Dataset(y=self.inputs_["z_maps"]) - est = pymare.estimators.FisherCombinationTest(mode=self._mode) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - - z_map = _boolean_unmask(est_summary.z.squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(est_summary.p.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["z_maps"].shape[0] - 1, - self.inputs_["z_maps"].shape[1], - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model(self.inputs_["z_maps"][:, voxel_mask]) + z_map, p_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["z_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["z_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) for bag in self.inputs_["data_bags"]["z_maps"]: - pymare_dset = pymare.Dataset(y=bag["values"]) - est = pymare.estimators.FisherCombinationTest(mode=self._mode) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - z_map[bag["voxel_mask"]] = est_summary.z.squeeze() - p_map[bag["voxel_mask"]] = est_summary.p.squeeze() - dof_map[bag["voxel_mask"]] = bag["values"].shape[0] - 1 + ( + z_map[bag["voxel_mask"]], + p_map[bag["voxel_mask"]], + dof_map[bag["voxel_mask"]], + ) = self._fit_model(bag["values"]) maps = {"z": z_map, "p": p_map, "dof": dof_map} description = self._generate_description() @@ -300,9 +299,11 @@ class Stouffers(IBMAEstimator): .. versionchanged:: 0.2.3 * New parameter: ``two_sided``, controls the type of test to be performed. In addition, - the default is now set to True (two-sided), which differs from previous versions - where only one-sided tests were performed. + the default is now set to True (two-sided), which differs from previous versions + where only one-sided tests were performed. * Add correction for multiple contrasts within a study. + * New parameter: ``normalize_contrast_weights`` to normalized the weights by the + number of contrasts in each study. .. versionchanged:: 0.2.1 @@ -320,6 +321,9 @@ class Stouffers(IBMAEstimator): Whether to use sample sizes for weights (i.e., "weighted Stouffer's") or not, as described in :footcite:t:`zaykin2011optimally`. Default is False. + normalize_contrast_weights : :obj:`bool`, optional + Whether to use number of contrast per study to normalized the weights or not. + Default is False. two_sided : :obj:`bool`, optional If True, performs an unsigned t-test. Both positive and negative effects are considered; the null hypothesis is that the effect is zero. If False, only positive effects are @@ -361,15 +365,37 @@ class Stouffers(IBMAEstimator): _required_inputs = {"z_maps": ("image", "z")} - def __init__(self, use_sample_size=False, two_sided=True, **kwargs): + def __init__( + self, + use_sample_size=False, + normalize_contrast_weights=False, + two_sided=True, + **kwargs, + ): super().__init__(**kwargs) self.use_sample_size = use_sample_size if self.use_sample_size: self._required_inputs["sample_sizes"] = ("metadata", "sample_sizes") + self.normalize_contrast_weights = normalize_contrast_weights + self.two_sided = two_sided self._mode = "concordant" if self.two_sided else "directed" + def _preprocess_input(self, dataset): + """Preprocess additional inputs to the Estimator from the Dataset as needed.""" + super()._preprocess_input(dataset) + + study_mask = dataset.images["id"].isin(self.inputs_["id"]) + + # Convert each contrast name to a unique integer value. + labels = dataset.images["study_id"][study_mask].to_list() + label_to_int = {label: i for i, label in enumerate(set(labels))} + label_counts = Counter(labels) + + self.inputs_["contrast_names"] = np.array([label_to_int[label] for label in labels]) + self.inputs_["num_contrasts"] = np.array([label_counts[label] for label in labels]) + def _generate_description(self): description = ( f"An image-based meta-analysis was performed with NiMARE {__version__} " @@ -388,10 +414,44 @@ def _generate_description(self): return description - def _group_encoder(self, labels): - """Convert each group to a unique integer value.""" - label_to_int = {label: i for i, label in enumerate(set(labels))} - return np.array([label_to_int[label] for label in labels]) + def _fit_model(self, stat_maps, study_mask=None, corr=None): + """Fit the model to the data.""" + n_studies, n_voxels = stat_maps.shape + + if study_mask is None: + # If no mask is provided, assume all studies are included. This is always the case + # when using the aggressive mask. + study_mask = np.arange(n_studies) + + est = pymare.estimators.StoufferCombinationTest(mode=self._mode) + + contrast_maps, sub_corr = None, None + if corr is not None: + contrast_maps = np.tile(self.inputs_["contrast_names"][study_mask], (n_voxels, 1)).T + sub_corr = corr[np.ix_(study_mask, study_mask)] + + weights = np.ones(n_studies) + + if self.normalize_contrast_weights: + weights *= 1 / self.inputs_["num_contrasts"][study_mask] + + if self.use_sample_size: + sample_sizes = np.array( + [np.mean(self.inputs_["sample_sizes"][idx]) for idx in study_mask] + ) + weights *= np.sqrt(sample_sizes) + + weight_maps = np.tile(weights, (n_voxels, 1)).T + + pymare_dset = pymare.Dataset(y=stat_maps, n=weight_maps, v=contrast_maps) + est.fit_dataset(pymare_dset, corr=sub_corr) + est_summary = est.summary() + + z_map = est_summary.z.squeeze() + p_map = est_summary.p.squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, dof_map def _fit(self, dataset): self.dataset = dataset @@ -404,69 +464,33 @@ def _fit(self, dataset): "will produce invalid results." ) - groups = self._group_encoder(self.dataset.images["study_id"].to_list()) - - _get_group_maps = False - corr, sub_corr, group_maps = None, None, None - if groups.size != np.unique(groups).size: + corr = None + if self.inputs_["contrast_names"].size != np.unique(self.inputs_["contrast_names"]).size: # If all studies are not unique, we will need to correct for multiple contrasts - _get_group_maps = True - corr = np.corrcoef(self.inputs_["raw_data"]["z_maps"], rowvar=True) + corr = np.corrcoef(self.inputs_["z_maps"], rowvar=True) if self.aggressive_mask: - est = pymare.estimators.StoufferCombinationTest(mode=self._mode) - - if _get_group_maps: - id_mask = self.dataset.images["id"].isin(self.inputs_["id"]) - group_maps = np.tile(groups[id_mask], (self.inputs_["z_maps"].shape[1], 1)).T - - if self.use_sample_size: - sample_sizes = np.array([np.mean(n) for n in self.inputs_["sample_sizes"]]) - weights = np.sqrt(sample_sizes) - weight_maps = np.tile(weights, (self.inputs_["z_maps"].shape[1], 1)).T - pymare_dset = pymare.Dataset(y=self.inputs_["z_maps"], n=weight_maps, v=group_maps) - else: - pymare_dset = pymare.Dataset(y=self.inputs_["z_maps"], v=group_maps) - - est.fit_dataset(pymare_dset, corr=corr) - est_summary = est.summary() - - z_map = _boolean_unmask(est_summary.z.squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(est_summary.p.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["z_maps"].shape[0] - 1, self.inputs_["z_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["z_maps"][:, voxel_mask], + corr=corr, + ) + + z_map, p_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["z_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["z_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) for bag in self.inputs_["data_bags"]["z_maps"]: - est = pymare.estimators.StoufferCombinationTest(mode=self._mode) - - study_mask = bag["study_mask"] - - if _get_group_maps: - # Normally, we expect studies from the same group to be in the same bag. - group_maps = np.tile(groups[study_mask], (bag["values"].shape[1], 1)).T - sub_corr = corr[np.ix_(study_mask, study_mask)] - - if self.use_sample_size: - sample_sizes_ex = [self.inputs_["sample_sizes"][study] for study in study_mask] - sample_sizes = np.array([np.mean(n) for n in sample_sizes_ex]) - weights = np.sqrt(sample_sizes) - weight_maps = np.tile(weights, (bag["values"].shape[1], 1)).T - pymare_dset = pymare.Dataset(y=bag["values"], n=weight_maps, v=group_maps) - else: - pymare_dset = pymare.Dataset(y=bag["values"], v=group_maps) - - est.fit_dataset(pymare_dset, corr=sub_corr) - est_summary = est.summary() - z_map[bag["voxel_mask"]] = est_summary.z.squeeze() - p_map[bag["voxel_mask"]] = est_summary.p.squeeze() - dof_map[bag["voxel_mask"]] = bag["values"].shape[0] - 1 + ( + z_map[bag["voxel_mask"]], + p_map[bag["voxel_mask"]], + dof_map[bag["voxel_mask"]], + ) = self._fit_model(bag["values"], bag["study_mask"], corr=corr) maps = {"z": z_map, "p": p_map, "dof": dof_map} description = self._generate_description() @@ -561,6 +585,24 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, varcope_maps): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + pymare_dset = pymare.Dataset(y=beta_maps, v=varcope_maps) + est = pymare.estimators.WeightedLeastSquares(tau2=self.tau2) + est.fit_dataset(pymare_dset) + est_summary = est.summary() + + fe_stats = est_summary.get_fe_stats() + z_map = fe_stats["z"].squeeze() + p_map = fe_stats["p"].squeeze() + est_map = fe_stats["est"].squeeze() + se_map = fe_stats["se"].squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, est_map, se_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker @@ -572,49 +614,36 @@ def _fit(self, dataset): ) if self.aggressive_mask: - pymare_dset = pymare.Dataset( - y=self.inputs_["beta_maps"], v=self.inputs_["varcope_maps"] + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], + self.inputs_["varcope_maps"][:, voxel_mask], ) - est = pymare.estimators.WeightedLeastSquares(tau2=self.tau2) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - - fe_stats = est_summary.get_fe_stats() - z_map = _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]) - est_map = _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]) - se_map = _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["beta_maps"].shape[0] - 1, self.inputs_["beta_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + z_map, p_map, est_map, se_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - est_map = np.zeros(n_total_voxels, dtype=float) - se_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["beta_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + est_map = np.zeros(n_voxels, dtype=float) + se_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) + beta_bags = self.inputs_["data_bags"]["beta_maps"] varcope_bags = self.inputs_["data_bags"]["varcope_maps"] for beta_bag, varcope_bag in zip(beta_bags, varcope_bags): - pymare_dset = pymare.Dataset(y=beta_bag["values"], v=varcope_bag["values"]) - est = pymare.estimators.WeightedLeastSquares(tau2=self.tau2) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - - fe_stats = est_summary.get_fe_stats() - z_map[beta_bag["voxel_mask"]] = fe_stats["z"].squeeze() - p_map[beta_bag["voxel_mask"]] = fe_stats["p"].squeeze() - est_map[beta_bag["voxel_mask"]] = fe_stats["est"].squeeze() - se_map[beta_bag["voxel_mask"]] = fe_stats["se"].squeeze() - dof_map[beta_bag["voxel_mask"]] = beta_bag["values"].shape[0] - 1 + ( + z_map[beta_bag["voxel_mask"]], + p_map[beta_bag["voxel_mask"]], + est_map[beta_bag["voxel_mask"]], + se_map[beta_bag["voxel_mask"]], + dof_map[beta_bag["voxel_mask"]], + ) = self._fit_model(beta_bag["values"], varcope_bag["values"]) # tau2 is a float, not a map, so it can't go into the results dictionary - tables = { - "level-estimator": pd.DataFrame(columns=["tau2"], data=[self.tau2]), - } + tables = {"level-estimator": pd.DataFrame(columns=["tau2"], data=[self.tau2])} maps = {"z": z_map, "p": p_map, "est": est_map, "se": se_map, "dof": dof_map} description = self._generate_description() @@ -700,6 +729,25 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, varcope_maps): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + pymare_dset = pymare.Dataset(y=beta_maps, v=varcope_maps) + est = pymare.estimators.DerSimonianLaird() + est.fit_dataset(pymare_dset) + est_summary = est.summary() + + fe_stats = est_summary.get_fe_stats() + z_map = fe_stats["z"].squeeze() + p_map = fe_stats["p"].squeeze() + est_map = fe_stats["est"].squeeze() + se_map = fe_stats["se"].squeeze() + tau2_map = est_summary.tau2.squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, est_map, se_map, tau2_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker @@ -711,47 +759,35 @@ def _fit(self, dataset): ) if self.aggressive_mask: - est = pymare.estimators.DerSimonianLaird() - pymare_dset = pymare.Dataset( - y=self.inputs_["beta_maps"], v=self.inputs_["varcope_maps"] + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], + self.inputs_["varcope_maps"][:, voxel_mask], ) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - - fe_stats = est_summary.get_fe_stats() - z_map = _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]) - est_map = _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]) - se_map = _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]) - tau2_map = _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["beta_maps"].shape[0] - 1, self.inputs_["beta_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + z_map, p_map, est_map, se_map, tau2_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - est_map = np.zeros(n_total_voxels, dtype=float) - se_map = np.zeros(n_total_voxels, dtype=float) - tau2_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["beta_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + est_map = np.zeros(n_voxels, dtype=float) + se_map = np.zeros(n_voxels, dtype=float) + tau2_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) + beta_bags = self.inputs_["data_bags"]["beta_maps"] varcope_bags = self.inputs_["data_bags"]["varcope_maps"] for beta_bag, varcope_bag in zip(beta_bags, varcope_bags): - est = pymare.estimators.DerSimonianLaird() - pymare_dset = pymare.Dataset(y=beta_bag["values"], v=varcope_bag["values"]) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - - fe_stats = est_summary.get_fe_stats() - z_map[beta_bag["voxel_mask"]] = fe_stats["z"].squeeze() - p_map[beta_bag["voxel_mask"]] = fe_stats["p"].squeeze() - est_map[beta_bag["voxel_mask"]] = fe_stats["est"].squeeze() - se_map[beta_bag["voxel_mask"]] = fe_stats["se"].squeeze() - tau2_map[beta_bag["voxel_mask"]] = est_summary.tau2.squeeze() - dof_map[beta_bag["voxel_mask"]] = beta_bag["values"].shape[0] - 1 + ( + z_map[beta_bag["voxel_mask"]], + p_map[beta_bag["voxel_mask"]], + est_map[beta_bag["voxel_mask"]], + se_map[beta_bag["voxel_mask"]], + tau2_map[beta_bag["voxel_mask"]], + dof_map[beta_bag["voxel_mask"]], + ) = self._fit_model(beta_bag["values"], varcope_bag["values"]) maps = { "z": z_map, @@ -844,6 +880,25 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, varcope_maps): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + pymare_dset = pymare.Dataset(y=beta_maps, v=varcope_maps) + est = pymare.estimators.Hedges() + est.fit_dataset(pymare_dset) + est_summary = est.summary() + + fe_stats = est_summary.get_fe_stats() + z_map = fe_stats["z"].squeeze() + p_map = fe_stats["p"].squeeze() + est_map = fe_stats["est"].squeeze() + se_map = fe_stats["se"].squeeze() + tau2_map = est_summary.tau2.squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, est_map, se_map, tau2_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker @@ -855,47 +910,35 @@ def _fit(self, dataset): ) if self.aggressive_mask: - est = pymare.estimators.Hedges() - pymare_dset = pymare.Dataset( - y=self.inputs_["beta_maps"], v=self.inputs_["varcope_maps"] + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], + self.inputs_["varcope_maps"][:, voxel_mask], ) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map = _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]) - est_map = _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]) - se_map = _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]) - tau2_map = _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["beta_maps"].shape[0] - 1, self.inputs_["beta_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + z_map, p_map, est_map, se_map, tau2_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - est_map = np.zeros(n_total_voxels, dtype=float) - se_map = np.zeros(n_total_voxels, dtype=float) - tau2_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["beta_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + est_map = np.zeros(n_voxels, dtype=float) + se_map = np.zeros(n_voxels, dtype=float) + tau2_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) + beta_bags = self.inputs_["data_bags"]["beta_maps"] varcope_bags = self.inputs_["data_bags"]["varcope_maps"] for beta_bag, varcope_bag in zip(beta_bags, varcope_bags): - est = pymare.estimators.Hedges() - pymare_dset = pymare.Dataset(y=beta_bag["values"], v=varcope_bag["values"]) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map[beta_bag["voxel_mask"]] = fe_stats["z"].squeeze() - p_map[beta_bag["voxel_mask"]] = fe_stats["p"].squeeze() - est_map[beta_bag["voxel_mask"]] = fe_stats["est"].squeeze() - se_map[beta_bag["voxel_mask"]] = fe_stats["se"].squeeze() - tau2_map[beta_bag["voxel_mask"]] = est_summary.tau2.squeeze() - dof_map[beta_bag["voxel_mask"]] = beta_bag["values"].shape[0] - 1 + ( + z_map[beta_bag["voxel_mask"]], + p_map[beta_bag["voxel_mask"]], + est_map[beta_bag["voxel_mask"]], + se_map[beta_bag["voxel_mask"]], + tau2_map[beta_bag["voxel_mask"]], + dof_map[beta_bag["voxel_mask"]], + ) = self._fit_model(beta_bag["values"], varcope_bag["values"]) maps = { "z": z_map, @@ -1004,61 +1047,67 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, study_mask=None): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + if study_mask is None: + # If no mask is provided, assume all studies are included. This is always the case + # when using the aggressive mask. + study_mask = np.arange(n_studies) + + sample_sizes = np.array([np.mean(self.inputs_["sample_sizes"][idx]) for idx in study_mask]) + n_maps = np.tile(sample_sizes, (n_voxels, 1)).T + + pymare_dset = pymare.Dataset(y=beta_maps, n=n_maps) + est = pymare.estimators.SampleSizeBasedLikelihoodEstimator(method=self.method) + est.fit_dataset(pymare_dset) + est_summary = est.summary() + fe_stats = est_summary.get_fe_stats() + + z_map = fe_stats["z"].squeeze() + p_map = fe_stats["p"].squeeze() + est_map = fe_stats["est"].squeeze() + se_map = fe_stats["se"].squeeze() + tau2_map = est_summary.tau2.squeeze() + sigma2_map = est.params_["sigma2"].squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, est_map, se_map, tau2_map, sigma2_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker if self.aggressive_mask: - sample_sizes = np.array([np.mean(n) for n in self.inputs_["sample_sizes"]]) - n_maps = np.tile(sample_sizes, (self.inputs_["beta_maps"].shape[1], 1)).T - - pymare_dset = pymare.Dataset(y=self.inputs_["beta_maps"], n=n_maps) - est = pymare.estimators.SampleSizeBasedLikelihoodEstimator(method=self.method) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map = _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]) - est_map = _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]) - se_map = _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]) - tau2_map = _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]) - sigma2_map = _boolean_unmask( - est.params_["sigma2"].squeeze(), self.inputs_["aggressive_mask"] + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], ) - dof_map = np.tile( - self.inputs_["beta_maps"].shape[0] - 1, self.inputs_["beta_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) + z_map, p_map, est_map, se_map, tau2_map, sigma2_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) + ) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - est_map = np.zeros(n_total_voxels, dtype=float) - se_map = np.zeros(n_total_voxels, dtype=float) - tau2_map = np.zeros(n_total_voxels, dtype=float) - sigma2_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["beta_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + est_map = np.zeros(n_voxels, dtype=float) + se_map = np.zeros(n_voxels, dtype=float) + tau2_map = np.zeros(n_voxels, dtype=float) + sigma2_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) + for bag in self.inputs_["data_bags"]["beta_maps"]: - study_mask = bag["study_mask"] - sample_sizes_ex = [self.inputs_["sample_sizes"][study] for study in study_mask] - sample_sizes = np.array([np.mean(n) for n in sample_sizes_ex]) - n_maps = np.tile(sample_sizes, (bag["values"].shape[1], 1)).T - - pymare_dset = pymare.Dataset(y=bag["values"], n=n_maps) - est = pymare.estimators.SampleSizeBasedLikelihoodEstimator(method=self.method) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map[bag["voxel_mask"]] = fe_stats["z"].squeeze() - p_map[bag["voxel_mask"]] = fe_stats["p"].squeeze() - est_map[bag["voxel_mask"]] = fe_stats["est"].squeeze() - se_map[bag["voxel_mask"]] = fe_stats["se"].squeeze() - tau2_map[bag["voxel_mask"]] = est_summary.tau2.squeeze() - sigma2_map[bag["voxel_mask"]] = est.params_["sigma2"].squeeze() - dof_map[bag["voxel_mask"]] = bag["values"].shape[0] - 1 + ( + z_map[bag["voxel_mask"]], + p_map[bag["voxel_mask"]], + est_map[bag["voxel_mask"]], + se_map[bag["voxel_mask"]], + tau2_map[bag["voxel_mask"]], + sigma2_map[bag["voxel_mask"]], + dof_map[bag["voxel_mask"]], + ) = self._fit_model(bag["values"], bag["study_mask"]) maps = { "z": z_map, @@ -1172,6 +1221,25 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, varcope_maps): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + pymare_dset = pymare.Dataset(y=beta_maps, v=varcope_maps) + est = pymare.estimators.VarianceBasedLikelihoodEstimator(method=self.method) + est.fit_dataset(pymare_dset) + est_summary = est.summary() + fe_stats = est_summary.get_fe_stats() + + z_map = fe_stats["z"].squeeze() + p_map = fe_stats["p"].squeeze() + est_map = fe_stats["est"].squeeze() + se_map = fe_stats["se"].squeeze() + tau2_map = est_summary.tau2.squeeze() + dof_map = np.tile(n_studies - 1, n_voxels).astype(np.int32) + + return z_map, p_map, est_map, se_map, tau2_map, dof_map + def _fit(self, dataset): self.dataset = dataset self.masker = self.masker or dataset.masker @@ -1184,48 +1252,35 @@ def _fit(self, dataset): ) if self.aggressive_mask: - est = pymare.estimators.VarianceBasedLikelihoodEstimator(method=self.method) + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], + self.inputs_["varcope_maps"][:, voxel_mask], + ) - pymare_dset = pymare.Dataset( - y=self.inputs_["beta_maps"], v=self.inputs_["varcope_maps"] + z_map, p_map, est_map, se_map, tau2_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps) ) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map = _boolean_unmask(fe_stats["z"].squeeze(), self.inputs_["aggressive_mask"]) - p_map = _boolean_unmask(fe_stats["p"].squeeze(), self.inputs_["aggressive_mask"]) - est_map = _boolean_unmask(fe_stats["est"].squeeze(), self.inputs_["aggressive_mask"]) - se_map = _boolean_unmask(fe_stats["se"].squeeze(), self.inputs_["aggressive_mask"]) - tau2_map = _boolean_unmask(est_summary.tau2.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile( - self.inputs_["beta_maps"].shape[0] - 1, self.inputs_["beta_maps"].shape[1] - ).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - z_map = np.zeros(n_total_voxels, dtype=float) - p_map = np.zeros(n_total_voxels, dtype=float) - est_map = np.zeros(n_total_voxels, dtype=float) - se_map = np.zeros(n_total_voxels, dtype=float) - tau2_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) + n_voxels = self.inputs_["beta_maps"].shape[1] + z_map = np.zeros(n_voxels, dtype=float) + p_map = np.zeros(n_voxels, dtype=float) + est_map = np.zeros(n_voxels, dtype=float) + se_map = np.zeros(n_voxels, dtype=float) + tau2_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) + beta_bags = self.inputs_["data_bags"]["beta_maps"] varcope_bags = self.inputs_["data_bags"]["varcope_maps"] for beta_bag, varcope_bag in zip(beta_bags, varcope_bags): - est = pymare.estimators.VarianceBasedLikelihoodEstimator(method=self.method) - - pymare_dset = pymare.Dataset(y=beta_bag["values"], v=varcope_bag["values"]) - est.fit_dataset(pymare_dset) - est_summary = est.summary() - fe_stats = est_summary.get_fe_stats() - - z_map[beta_bag["voxel_mask"]] = fe_stats["z"].squeeze() - p_map[beta_bag["voxel_mask"]] = fe_stats["p"].squeeze() - est_map[beta_bag["voxel_mask"]] = fe_stats["est"].squeeze() - se_map[beta_bag["voxel_mask"]] = fe_stats["se"].squeeze() - tau2_map[beta_bag["voxel_mask"]] = est_summary.tau2.squeeze() - dof_map[beta_bag["voxel_mask"]] = beta_bag["values"].shape[0] - 1 + ( + z_map[beta_bag["voxel_mask"]], + p_map[beta_bag["voxel_mask"]], + est_map[beta_bag["voxel_mask"]], + se_map[beta_bag["voxel_mask"]], + tau2_map[beta_bag["voxel_mask"]], + dof_map[beta_bag["voxel_mask"]], + ) = self._fit_model(beta_bag["values"], varcope_bag["values"]) maps = { "z": z_map, @@ -1320,66 +1375,58 @@ def _generate_description(self): ) return description + def _fit_model(self, beta_maps, n_perm=0): + """Fit the model to the data.""" + n_studies, n_voxels = beta_maps.shape + + # Use intercept as explanatory variable + tested_vars = np.ones((n_studies, 1)) + confounding_vars = None + + log_p_map, t_map, _ = permuted_ols( + tested_vars, + beta_maps, + confounding_vars=confounding_vars, + model_intercept=False, # modeled by tested_vars + n_perm=n_perm, + two_sided_test=self.two_sided, + random_state=42, + n_jobs=1, + verbose=0, + ) + + # Convert t to z, preserving signs + dof = n_studies - 1 + + z_map = t_to_z(t_map, dof) + dof_map = np.tile(dof, n_voxels).astype(np.int32) + + return log_p_map.squeeze(), t_map.squeeze(), z_map.squeeze(), dof_map + def _fit(self, dataset): self.dataset = dataset if self.aggressive_mask: - # Use intercept as explanatory variable - self.parameters_["tested_vars"] = np.ones((self.inputs_["beta_maps"].shape[0], 1)) - self.parameters_["confounding_vars"] = None - - _, t_map, _ = permuted_ols( - self.parameters_["tested_vars"], - self.inputs_["beta_maps"], - confounding_vars=self.parameters_["confounding_vars"], - model_intercept=False, # modeled by tested_vars - n_perm=0, - two_sided_test=self.two_sided, - random_state=42, - n_jobs=1, - verbose=0, - ) + voxel_mask = self.inputs_["aggressive_mask"] + result_maps = self._fit_model(self.inputs_["beta_maps"][:, voxel_mask]) - # Convert t to z, preserving signs - dof = ( - self.parameters_["tested_vars"].shape[0] - self.parameters_["tested_vars"].shape[1] + # Skip log_p_map + t_map, z_map, dof_map = tuple( + map(lambda x: _boolean_unmask(x, voxel_mask), result_maps[1:]) ) - z_map = t_to_z(t_map, dof) - - t_map = _boolean_unmask(t_map.squeeze(), self.inputs_["aggressive_mask"]) - z_map = _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]) - dof_map = np.tile(dof, self.inputs_["beta_maps"].shape[1]).astype(np.int32) - dof_map = _boolean_unmask(dof_map, self.inputs_["aggressive_mask"]) - else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - t_map = np.zeros(n_total_voxels, dtype=float) - z_map = np.zeros(n_total_voxels, dtype=float) - dof_map = np.zeros(n_total_voxels, dtype=np.int32) - for bag in self.inputs_["data_bags"]["beta_maps"]: - # Use intercept as explanatory variable - bag["tested_vars"] = np.ones((bag["values"].shape[0], 1)) - bag["confounding_vars"] = None - - _, t_map_tmp, _ = permuted_ols( - bag["tested_vars"], - bag["values"], - confounding_vars=bag["confounding_vars"], - model_intercept=False, # modeled by tested_vars - n_perm=0, - two_sided_test=self.two_sided, - random_state=42, - n_jobs=1, - verbose=0, - ) - - # Convert t to z, preserving signs - dof = bag["tested_vars"].shape[0] - bag["tested_vars"].shape[1] - z_map_tmp = t_to_z(t_map_tmp, dof) + n_voxels = self.inputs_["beta_maps"].shape[1] + t_map = np.zeros(n_voxels, dtype=float) + z_map = np.zeros(n_voxels, dtype=float) + dof_map = np.zeros(n_voxels, dtype=np.int32) - t_map[bag["voxel_mask"]] = t_map_tmp.squeeze() - z_map[bag["voxel_mask"]] = z_map_tmp.squeeze() - dof_map[bag["voxel_mask"]] = dof + for bag in self.inputs_["data_bags"]["beta_maps"]: + ( + _, # Skip log_p_map + t_map[bag["voxel_mask"]], + z_map[bag["voxel_mask"]], + dof_map[bag["voxel_mask"]], + ) = self._fit_model(bag["values"]) maps = {"t": t_map, "z": z_map, "dof": dof_map} description = self._generate_description() @@ -1431,16 +1478,9 @@ def correct_fwe_montecarlo(self, result, n_iters=5000, n_cores=1): n_cores = _check_ncores(n_cores) if self.aggressive_mask: - log_p_map, t_map, _ = permuted_ols( - self.parameters_["tested_vars"], - self.inputs_["beta_maps"], - confounding_vars=self.parameters_["confounding_vars"], - model_intercept=False, # modeled by tested_vars - n_perm=n_iters, - two_sided_test=self.two_sided, - random_state=42, - n_jobs=n_cores, - verbose=0, + voxel_mask = self.inputs_["aggressive_mask"] + log_p_map, t_map, _, _ = self._fit_model( + self.inputs_["beta_maps"][:, voxel_mask], n_perm=n_iters ) # Fill complete maps @@ -1451,24 +1491,17 @@ def correct_fwe_montecarlo(self, result, n_iters=5000, n_cores=1): sign[sign == 0] = 1 z_map = p_to_z(p_map, tail="two") * sign - log_p_map = _boolean_unmask(log_p_map.squeeze(), self.inputs_["aggressive_mask"]) - z_map = _boolean_unmask(z_map.squeeze(), self.inputs_["aggressive_mask"]) + log_p_map = _boolean_unmask(log_p_map, voxel_mask) + z_map = _boolean_unmask(z_map, voxel_mask) else: - n_total_voxels = self.inputs_["beta_maps"].shape[1] - log_p_map = np.zeros(n_total_voxels, dtype=float) - z_map = np.zeros(n_total_voxels, dtype=float) + n_voxels = self.inputs_["beta_maps"].shape[1] + log_p_map = np.zeros(n_voxels, dtype=float) + z_map = np.zeros(n_voxels, dtype=float) + for bag in self.inputs_["data_bags"]["beta_maps"]: - log_p_map_tmp, t_map_tmp, _ = permuted_ols( - bag["tested_vars"], - bag["values"], - confounding_vars=bag["confounding_vars"], - model_intercept=False, # modeled by tested_vars - n_perm=n_iters, - two_sided_test=self.two_sided, - random_state=42, - n_jobs=n_cores, - verbose=0, + log_p_map_tmp, t_map_tmp, _, _ = self._fit_model( + self.inputs_["beta_maps"][:, bag["voxel_mask"]], n_perm=n_iters ) # Fill complete maps diff --git a/nimare/reports/base.py b/nimare/reports/base.py index 9ff9c2876..a4e6fc139 100644 --- a/nimare/reports/base.py +++ b/nimare/reports/base.py @@ -66,6 +66,7 @@ "alpha": "Alpha", "prior": "Prior", "use_sample_size": "Use sample size for weights", + "normalize_contrast_weights": "Normalize by the number of contrasts", "two_sided": "Two-sided test", "beta": "Parameter estimate", "se": "Standard error of the parameter estimate", @@ -484,12 +485,8 @@ def __init__( ) elif meta_type == "IBMA": # Use "z_maps", for Fishers, and Stouffers; otherwise use "beta_maps". - key_maps = ( - "z_maps" - if "z_maps" in self.results.estimator.inputs_["raw_data"] - else "beta_maps" - ) - maps_arr = self.results.estimator.inputs_["raw_data"][key_maps] + key_maps = "z_maps" if "z_maps" in self.results.estimator.inputs_ else "beta_maps" + maps_arr = self.results.estimator.inputs_[key_maps] ids_ = self.results.estimator.inputs_["id"] x_label = "Z" if key_maps == "z_maps" else "Beta" diff --git a/nimare/tests/conftest.py b/nimare/tests/conftest.py index e5a4ad01d..93a539f72 100644 --- a/nimare/tests/conftest.py +++ b/nimare/tests/conftest.py @@ -198,3 +198,32 @@ def example_nimads_annotation(): with open(out_file, "r") as f: annotation = json.load(f) return annotation + + +@pytest.fixture(scope="session") +def testdata_ibma_multiple_contrasts(tmp_path_factory): + """Load data from dataset into global variables.""" + tmpdir = tmp_path_factory.mktemp("testdata_ibma_multiple_contrasts") + + # Load dataset + dset_file = os.path.join(get_test_data_path(), "test_pain_dataset_multiple_contrasts.json") + dset_dir = os.path.join(get_test_data_path(), "test_pain_dataset") + mask_file = os.path.join(dset_dir, "mask.nii.gz") + dset = nimare.dataset.Dataset(dset_file, mask=mask_file) + dset.update_path(dset_dir) + # Move image contents of Dataset to temporary directory + for c in dset.images.columns: + if c.endswith("__relative"): + continue + for f in dset.images[c].values: + if (f is None) or not os.path.isfile(f): + continue + new_f = f.replace( + dset_dir.rstrip(os.path.sep), str(tmpdir.absolute()).rstrip(os.path.sep) + ) + dirname = os.path.dirname(new_f) + if not os.path.isdir(dirname): + os.makedirs(dirname) + copyfile(f, new_f) + dset.update_path(tmpdir) + return dset diff --git a/nimare/tests/data/test_pain_dataset_multiple_contrasts.json b/nimare/tests/data/test_pain_dataset_multiple_contrasts.json new file mode 100644 index 000000000..606280164 --- /dev/null +++ b/nimare/tests/data/test_pain_dataset_multiple_contrasts.json @@ -0,0 +1,1242 @@ +{ + "pain.nidm": { + "contrasts": { + "01": { + "images": { + "beta": "pain_01_beta.nii.gz", + "se": "pain_01_se.nii.gz", + "t": "pain_01_t.nii.gz", + "varcope": "pain_01_varcope.nii.gz", + "z": "pain_01_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 25 + ] + } + }, + "01": { + "images": { + "beta": "pain_02_beta.nii.gz", + "se": "pain_02_se.nii.gz", + "t": "pain_02_t.nii.gz", + "varcope": "pain_02_varcope.nii.gz", + "z": "pain_02_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 25 + ] + } + }, + "03": { + "coords": { + "space": "MNI", + "x": [ + -58.0, + 52.0, + 60.0, + 12.0, + 18.0, + 22.0, + 32.0, + 46.0, + -24.0, + -26.0, + -32.0, + -28.0, + 8.0, + 4.0, + -2.0, + 0.0 + ], + "y": [ + 10.0, + 6.0, + 0.0, + 10.0, + -102.0, + -92.0, + -92.0, + -82.0, + -68.0, + -66.0, + -90.0, + -74.0, + 2.0, + -2.0, + 20.0, + -4.0 + ], + "z": [ + 8.0, + 0.0, + 0.0, + 0.0, + -4.0, + -6.0, + -12.0, + -8.0, + -28.0, + -40.0, + -10.0, + -32.0, + 60.0, + 52.0, + 40.0, + 60.0 + ] + }, + "images": { + "beta": "pain_03_beta.nii.gz", + "se": "pain_03_se.nii.gz", + "t": "pain_03_t.nii.gz", + "varcope": "pain_03_varcope.nii.gz", + "z": "pain_03_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 20 + ] + } + }, + "04": { + "coords": { + "space": "MNI", + "x": [ + 54.0, + 34.0, + 54.0, + 34.0, + 52.0, + 56.0, + 36.0, + 52.0, + 32.0, + 20.0, + 20.0, + 14.0, + 8.0, + 4.0, + 2.0, + -8.0, + -36.0, + -24.0, + -34.0, + -28.0, + -40.0, + -32.0, + -30.0, + -38.0 + ], + "y": [ + -28.0, + -20.0, + -22.0, + -18.0, + 4.0, + 10.0, + 2.0, + -8.0, + -98.0, + -92.0, + -100.0, + -104.0, + 4.0, + -6.0, + 10.0, + -8.0, + -90.0, + -68.0, + -88.0, + -98.0, + 4.0, + 18.0, + 16.0, + 16.0 + ], + "z": [ + 18.0, + 14.0, + 16.0, + -10.0, + -2.0, + 8.0, + -2.0, + -2.0, + -14.0, + -14.0, + -2.0, + -6.0, + 58.0, + 66.0, + 50.0, + 62.0, + -14.0, + -36.0, + -20.0, + -20.0, + -14.0, + -20.0, + 2.0, + 10.0 + ] + }, + "images": { + "beta": "pain_04_beta.nii.gz", + "se": "pain_04_se.nii.gz", + "t": "pain_04_t.nii.gz", + "varcope": "pain_04_varcope.nii.gz", + "z": "pain_04_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 20 + ] + } + }, + "05": { + "coords": { + "space": "MNI", + "x": [ + 40.0, + -34.0, + 42.0, + 38.0, + -20.0, + -32.0, + 42.0, + -32.0, + -6.0, + -8.0, + 6.0, + -4.0 + ], + "y": [ + 10.0, + 12.0, + 6.0, + 22.0, + -58.0, + -60.0, + -64.0, + -32.0, + 20.0, + -26.0, + 18.0, + 10.0 + ], + "z": [ + -8.0, + -10.0, + -6.0, + 4.0, + -44.0, + -34.0, + -32.0, + -44.0, + 40.0, + 28.0, + 42.0, + 46.0 + ] + }, + "images": { + "beta": "pain_05_beta.nii.gz", + "se": "pain_05_se.nii.gz", + "t": "pain_05_t.nii.gz", + "varcope": "pain_05_varcope.nii.gz", + "z": "pain_05_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 9 + ] + } + }, + "06": { + "coords": { + "space": "MNI", + "x": [ + -10.0, + -2.0, + -34.0, + 26.0 + ], + "y": [ + -28.0, + 6.0, + -4.0, + 44.0 + ], + "z": [ + 2.0, + 56.0, + -8.0, + 36.0 + ] + }, + "images": { + "beta": "pain_06_beta.nii.gz", + "se": "pain_06_se.nii.gz", + "t": "pain_06_t.nii.gz", + "varcope": "pain_06_varcope.nii.gz", + "z": "pain_06_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 9 + ] + } + }, + "07": { + "coords": { + "space": "MNI", + "x": [ + -10.0, + 22.0, + -34.0, + -40.0, + 8.0, + 2.0, + -4.0, + 0.0 + ], + "y": [ + -92.0, + -62.0, + -74.0, + -44.0, + -30.0, + -34.0, + -44.0, + -38.0 + ], + "z": [ + 2.0, + -4.0, + -42.0, + -12.0, + 82.0, + 78.0, + 74.0, + 84.0 + ] + }, + "images": { + "beta": "pain_07_beta.nii.gz", + "se": "pain_07_se.nii.gz", + "t": "pain_07_t.nii.gz", + "varcope": "pain_07_varcope.nii.gz", + "z": "pain_07_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 9 + ] + } + }, + "08": { + "coords": { + "space": "MNI", + "x": [ + 64.0, + 54.0, + 72.0, + 60.0, + 4.0, + 0.0, + -24.0, + 4.0, + 36.0, + 38.0, + 38.0, + 20.0, + -4.0, + 12.0, + 0.0, + 2.0, + -46.0, + -36.0, + -42.0 + ], + "y": [ + -38.0, + -30.0, + -38.0, + -54.0, + -26.0, + 14.0, + -8.0, + -36.0, + 44.0, + 56.0, + 38.0, + -44.0, + -70.0, + -68.0, + -46.0, + -86.0, + -56.0, + -60.0, + -44.0 + ], + "z": [ + 22.0, + 20.0, + 28.0, + 2.0, + 24.0, + 30.0, + 6.0, + -16.0, + 24.0, + 20.0, + 12.0, + 62.0, + 50.0, + 46.0, + 56.0, + -40.0, + -54.0, + -40.0, + -56.0 + ] + }, + "images": { + "beta": "pain_08_beta.nii.gz", + "se": "pain_08_se.nii.gz", + "t": "pain_08_t.nii.gz", + "varcope": "pain_08_varcope.nii.gz", + "z": "pain_08_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + }, + "09": { + "coords": { + "space": "MNI", + "x": [ + 60.0, + 68.0, + 66.0, + 54.0, + 8.0, + 2.0, + 46.0, + -8.0, + 36.0, + 30.0, + 24.0, + 52.0, + 14.0, + 38.0, + 30.0, + 36.0, + -28.0, + -20.0, + -46.0, + -50.0 + ], + "y": [ + -60.0, + -48.0, + -44.0, + -50.0, + -26.0, + -52.0, + -50.0, + -34.0, + 44.0, + 26.0, + 36.0, + 48.0, + -76.0, + -58.0, + -62.0, + -50.0, + -56.0, + -74.0, + -58.0, + -54.0 + ], + "z": [ + -16.0, + -14.0, + -4.0, + -14.0, + 18.0, + 50.0, + 54.0, + 20.0, + 34.0, + 12.0, + 16.0, + 24.0, + -42.0, + -44.0, + -38.0, + -40.0, + -46.0, + -40.0, + -54.0, + -44.0 + ] + }, + "images": { + "beta": "pain_09_beta.nii.gz", + "se": "pain_09_se.nii.gz", + "t": "pain_09_t.nii.gz", + "varcope": "pain_09_varcope.nii.gz", + "z": "pain_09_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + }, + "10": { + "coords": { + "space": "MNI", + "x": [ + 68.0, + 52.0, + 60.0, + 52.0, + 46.0, + 40.0, + 44.0, + 36.0, + -38.0, + -30.0, + 40.0, + 26.0, + -4.0, + 2.0, + -12.0, + -64.0, + 22.0, + -10.0, + -2.0, + -6.0 + ], + "y": [ + -36.0, + -30.0, + -38.0, + -40.0, + -2.0, + 8.0, + 4.0, + 16.0, + -54.0, + -60.0, + -36.0, + -32.0, + -18.0, + 18.0, + -24.0, + -24.0, + -40.0, + -66.0, + -38.0, + -68.0 + ], + "z": [ + 36.0, + 20.0, + 32.0, + 42.0, + -10.0, + 2.0, + 12.0, + 8.0, + -40.0, + -46.0, + -44.0, + -38.0, + 0.0, + 28.0, + -14.0, + 30.0, + 38.0, + 50.0, + 84.0, + 52.0 + ] + }, + "images": { + "beta": "pain_10_beta.nii.gz", + "se": "pain_10_se.nii.gz", + "t": "pain_10_t.nii.gz", + "varcope": "pain_10_varcope.nii.gz", + "z": "pain_10_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + } + } + }, + "pain_11.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 8.0, + -36.0, + -6.0, + 10.0 + ], + "y": [ + -22.0, + -20.0, + -48.0, + -12.0 + ], + "z": [ + 8.0, + 12.0, + 54.0, + 8.0 + ] + }, + "images": { + "beta": "pain_11_beta.nii.gz", + "se": "pain_11_se.nii.gz", + "t": "pain_11_t.nii.gz", + "varcope": "pain_11_varcope.nii.gz", + "z": "pain_11_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + } + } + }, + "pain_12.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + -36.0, + 38.0, + 38.0, + 56.0, + 4.0, + 12.0, + 2.0, + 10.0, + -34.0, + -28.0, + -30.0, + -30.0 + ], + "y": [ + 14.0, + 2.0, + 4.0, + -24.0, + -70.0, + -68.0, + -74.0, + -72.0, + 46.0, + 38.0, + 40.0, + 42.0 + ], + "z": [ + 2.0, + 2.0, + -16.0, + 24.0, + 34.0, + 34.0, + 48.0, + 52.0, + 30.0, + 26.0, + 16.0, + 38.0 + ] + }, + "images": { + "beta": "pain_12_beta.nii.gz", + "se": "pain_12_se.nii.gz", + "t": "pain_12_t.nii.gz", + "varcope": "pain_12_varcope.nii.gz", + "z": "pain_12_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 13 + ] + } + } + } + }, + "pain_13.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 38.0, + 36.0, + 38.0, + -38.0, + -64.0, + -66.0, + -58.0, + -64.0 + ], + "y": [ + 16.0, + 2.0, + -10.0, + 16.0, + -20.0, + -20.0, + -30.0, + -26.0 + ], + "z": [ + -6.0, + 4.0, + -4.0, + -2.0, + 20.0, + 28.0, + 34.0, + 40.0 + ] + }, + "images": { + "beta": "pain_13_beta.nii.gz", + "se": "pain_13_se.nii.gz", + "t": "pain_13_t.nii.gz", + "varcope": "pain_13_varcope.nii.gz", + "z": "pain_13_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 32 + ] + } + } + } + }, + "pain_14.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 32.0, + 22.0, + 38.0, + -34.0, + 28.0, + 26.0, + 16.0, + 16.0 + ], + "y": [ + 22.0, + -102.0, + 18.0, + -58.0, + -56.0, + -46.0, + -72.0, + -62.0 + ], + "z": [ + -2.0, + -10.0, + -8.0, + -34.0, + -66.0, + -62.0, + -62.0, + -64.0 + ] + }, + "images": { + "beta": "pain_14_beta.nii.gz", + "se": "pain_14_se.nii.gz", + "t": "pain_14_t.nii.gz", + "varcope": "pain_14_varcope.nii.gz", + "z": "pain_14_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 24 + ] + } + } + } + }, + "pain_15.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + -62.0, + 0.0, + 38.0, + -46.0 + ], + "y": [ + -22.0, + 6.0, + -8.0, + 4.0 + ], + "z": [ + 18.0, + 44.0, + -6.0, + 28.0 + ] + }, + "images": { + "beta": "pain_15_beta.nii.gz", + "se": "pain_15_se.nii.gz", + "t": "pain_15_t.nii.gz", + "varcope": "pain_15_varcope.nii.gz", + "z": "pain_15_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 14 + ] + } + } + } + }, + "pain_16.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 34.0, + 32.0, + 58.0, + 50.0, + 30.0, + 50.0, + 28.0, + 46.0, + -32.0, + -6.0, + -54.0, + -48.0, + -2.0, + -8.0, + -20.0, + 8.0, + -60.0, + -54.0, + -46.0, + -42.0 + ], + "y": [ + 18.0, + 22.0, + -46.0, + -24.0, + -54.0, + -50.0, + -48.0, + -54.0, + 14.0, + 10.0, + -28.0, + -34.0, + -54.0, + -46.0, + -46.0, + -68.0, + -62.0, + -48.0, + -56.0, + -48.0 + ], + "z": [ + -12.0, + -2.0, + 14.0, + 14.0, + -42.0, + -44.0, + -58.0, + -40.0, + -2.0, + 44.0, + 14.0, + 20.0, + 56.0, + 52.0, + 66.0, + 36.0, + 4.0, + 8.0, + 4.0, + -26.0 + ] + }, + "images": { + "beta": "pain_16_beta.nii.gz", + "se": "pain_16_se.nii.gz", + "t": "pain_16_t.nii.gz", + "varcope": "pain_16_varcope.nii.gz", + "z": "pain_16_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 14 + ] + } + } + } + }, + "pain_17.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + -60.0, + -36.0, + -52.0, + -54.0, + -2.0, + 0.0, + 0.0, + 0.0, + -42.0, + -10.0, + -50.0, + -32.0, + -48.0, + -52.0, + -44.0, + -58.0 + ], + "y": [ + -28.0, + -18.0, + -28.0, + -10.0, + -58.0, + -54.0, + -84.0, + -64.0, + -60.0, + -66.0, + -56.0, + -60.0, + -70.0, + -64.0, + -62.0, + -56.0 + ], + "z": [ + 20.0, + 4.0, + 22.0, + 4.0, + -18.0, + -10.0, + 6.0, + -8.0, + -36.0, + -28.0, + -20.0, + -26.0, + -4.0, + 4.0, + 4.0, + 8.0 + ] + }, + "images": { + "beta": "pain_17_beta.nii.gz", + "se": "pain_17_se.nii.gz", + "t": "pain_17_t.nii.gz", + "varcope": "pain_17_varcope.nii.gz", + "z": "pain_17_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + } + } + }, + "pain_18.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 58.0, + 38.0, + 36.0, + 52.0, + -56.0, + -56.0, + -36.0, + -36.0 + ], + "y": [ + -30.0, + 2.0, + 16.0, + -44.0, + -32.0, + -22.0, + -2.0, + -24.0 + ], + "z": [ + 18.0, + 2.0, + 0.0, + 8.0, + 20.0, + 24.0, + 6.0, + 10.0 + ] + }, + "images": { + "beta": "pain_18_beta.nii.gz", + "se": "pain_18_se.nii.gz", + "t": "pain_18_t.nii.gz", + "varcope": "pain_18_varcope.nii.gz", + "z": "pain_18_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 12 + ] + } + } + } + }, + "pain_19.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 40.0, + 42.0, + -52.0, + 34.0, + 40.0, + 30.0, + 20.0, + 38.0, + -24.0, + -28.0, + -14.0, + 20.0, + -2.0, + -6.0, + 4.0, + -8.0, + -42.0, + -38.0, + -42.0, + -50.0 + ], + "y": [ + 12.0, + 2.0, + -8.0, + 22.0, + 68.0, + 70.0, + 72.0, + 70.0, + -84.0, + -60.0, + -90.0, + -80.0, + 0.0, + 10.0, + 2.0, + 14.0, + 58.0, + 64.0, + 54.0, + 54.0 + ], + "z": [ + -6.0, + -4.0, + 6.0, + 2.0, + -6.0, + -10.0, + 18.0, + 6.0, + -26.0, + -34.0, + -28.0, + -28.0, + 48.0, + 38.0, + 50.0, + 34.0, + 0.0, + -6.0, + 12.0, + -10.0 + ] + }, + "images": { + "beta": "pain_19_beta.nii.gz", + "se": "pain_19_se.nii.gz", + "t": "pain_19_t.nii.gz", + "varcope": "pain_19_varcope.nii.gz", + "z": "pain_19_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 16 + ] + } + } + } + }, + "pain_20.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 40.0, + -54.0, + 36.0, + -36.0, + 2.0, + 4.0, + 6.0, + 10.0 + ], + "y": [ + 6.0, + -68.0, + 8.0, + -78.0, + -4.0, + 8.0, + 8.0, + -24.0 + ], + "z": [ + -8.0, + 0.0, + 2.0, + -20.0, + 64.0, + 52.0, + 34.0, + 40.0 + ] + }, + "images": { + "beta": "pain_20_beta.nii.gz", + "se": "pain_20_se.nii.gz", + "t": "pain_20_t.nii.gz", + "z": "pain_20_z.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 16 + ] + } + } + } + }, + "pain_21.nidm": { + "contrasts": { + "1": { + "coords": { + "space": "MNI", + "x": [ + 40.0, + 34.0, + 30.0, + -26.0, + 4.0, + -8.0, + 0.0, + -8.0, + -34.0, + -56.0, + -34.0, + -58.0 + ], + "y": [ + -72.0, + 10.0, + -78.0, + -86.0, + 12.0, + 14.0, + 2.0, + 6.0, + 14.0, + -34.0, + 8.0, + -44.0 + ], + "z": [ + -20.0, + 0.0, + -22.0, + -16.0, + 36.0, + 34.0, + 54.0, + 40.0, + 2.0, + 24.0, + -16.0, + 20.0 + ] + }, + "images": { + "beta": "pain_21_beta.nii.gz", + "se": "pain_21_se.nii.gz", + "t": "pain_21_t.nii.gz", + "varcope": "pain_21_varcope.nii.gz" + }, + "metadata": { + "sample_sizes": [ + 16 + ] + } + } + } + } +} diff --git a/nimare/tests/test_meta_ibma.py b/nimare/tests/test_meta_ibma.py index 0a245d759..8f41d630a 100644 --- a/nimare/tests/test_meta_ibma.py +++ b/nimare/tests/test_meta_ibma.py @@ -26,7 +26,7 @@ ), pytest.param( ibma.Stouffers, - {"use_sample_size": False}, + {"use_sample_size": False, "normalize_contrast_weights": False}, None, {}, ("z", "p", "dof"), @@ -34,11 +34,27 @@ ), pytest.param( ibma.Stouffers, - {"use_sample_size": True}, + {"use_sample_size": True, "normalize_contrast_weights": False}, None, {}, ("z", "p", "dof"), - id="Stouffers_weighted", + id="Stouffers_sample_weighted", + ), + pytest.param( + ibma.Stouffers, + {"use_sample_size": False, "normalize_contrast_weights": True}, + None, + {}, + ("z", "p", "dof"), + id="Stouffers_contrast_weighted", + ), + pytest.param( + ibma.Stouffers, + {"use_sample_size": True, "normalize_contrast_weights": True}, + None, + {}, + ("z", "p", "dof"), + id="Stouffers_sample_contrast_weighted", ), pytest.param( ibma.WeightedLeastSquares, @@ -201,3 +217,15 @@ def test_ibma_resampling(testdata_ibma_resample, resample_kwargs): results = meta.fit(testdata_ibma_resample) assert isinstance(results, nimare.results.MetaResult) + + +def test_stouffers_multiple_contrasts(testdata_ibma_multiple_contrasts): + """Test Stouffer's correction with multiple contrasts.""" + meta = ibma.Stouffers() + results = meta.fit(testdata_ibma_multiple_contrasts) + + assert isinstance(results, nimare.results.MetaResult) + assert results.get_map("z", return_type="array").ndim == 1 + z_img = results.get_map("z") + assert z_img.ndim == 3 + assert z_img.shape == (10, 10, 10)