From eb40d0825deff4f9cccc8875fecf08093d198be6 Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Fri, 12 Jan 2018 17:09:13 -0500 Subject: [PATCH 01/19] Updating data definitions for Siemens BIDS2NDA conversion scanner_software_versions_pd get from SoftwareVersions edited at line 165: dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("HardcopyDeviceSoftwareVersion", "")) to dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("SoftwareVersions", "")) image_slice_thickness SliceThickness Inserted at line 170 dict_append(image03_dict, 'image_slice_thickness', metadata.get("SliceThickness", "")) --- bids2nda/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index c83b787..c565581 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -162,11 +162,12 @@ def run(args): dict_append(image03_dict, 'image_modality', "MRI") dict_append(image03_dict, 'scanner_manufacturer_pd', metadata.get("Manufacturer", "")) dict_append(image03_dict, 'scanner_type_pd', metadata.get("ManufacturersModelName", "")) - dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("HardcopyDeviceSoftwareVersion", "")) + dict_append(image03_dict, 'scanner_software_versions_pd', metadata.get("SoftwareVersions", "")) dict_append(image03_dict, 'magnetic_field_strength', metadata.get("MagneticFieldStrength", "")) dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", "")) dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", "")) dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", "")) + dict_append(image03_dict, 'image_slice_thickness', metadata.get("SliceThickness", "")) dict_append(image03_dict, 'transformation_performed', 'Yes') dict_append(image03_dict, 'transformation_type', 'BIDS2NDA') From 7b5737e70f37fc90ba9284ceadbd73995bcc880a Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Fri, 12 Jan 2018 17:34:24 -0500 Subject: [PATCH 02/19] Data dictionary for image_orientation Edited to include image_orientation image_orientation: modified from https://stackoverflow.com/questions/34782409/understanding-dicom-image-a ttributes-to-get-axial-coronal-sagittal-cuts and https://github.com/rordenlab/dcm2niix/issues/153#event-1421631442 photomet_interpret: from DICOM/JSON field PhotometricInterpretation --- bids2nda/main.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index c565581..333dec7 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -167,7 +167,17 @@ def run(args): dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", "")) dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", "")) dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", "")) - dict_append(image03_dict, 'image_slice_thickness', metadata.get("SliceThickness", "")) + dict_append(image03_dict, 'image_slice_thickness', metadata.get("SliceThickness", "")) + dict_append(image03_dict, 'photomet_interpret', metadata.get("PhotometricInterpretation", "")) + + plane = metadata.get("ImageOrientationPatient") + plane=round(plane) + if plane[0] == 1: + dict_append(image03_dict, 'image_orientation.', "Sagittal") + elif plane[1] == 1: + dict_append(image03_dict, 'image_orientation.', "Coronal") + elif plane[2] == 1: + dict_append(image03_dict, 'image_orientation.', "Axial") dict_append(image03_dict, 'transformation_performed', 'Yes') dict_append(image03_dict, 'transformation_type', 'BIDS2NDA') From 4fa8695aa7010ac7832beccb546ae2fb8667e910 Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Fri, 12 Jan 2018 17:54:07 -0500 Subject: [PATCH 03/19] Slightly modified readme.md Added a notes section talking about required experiment_id field --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 069e0fb..e9b180d 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ Extract NIHM Data Archive compatible metadata from Brain Imaging Data Structure optional arguments: -h, --help show this help message and exit - + ## GUID_MAPPING file format The is the file format produced by the GUID Tool: one line per subject in the format @@ -32,3 +32,7 @@ The is the file format produced by the GUID Tool: one line per subject in the fo ## Example outputs See [/examples](/examples) + +## Notes: +Column experiment_id must be manually filled in for now. +This is based on experiment ID's received from NDA after setting the study up through the NDA website [here](https://ndar.nih.gov/user/dashboard/collections.html). From e19b5edd61f13cc1beab868b6dfd47f427a68c67 Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Sat, 13 Jan 2018 16:34:25 -0500 Subject: [PATCH 04/19] Edits to try to fix image_orientation --- bids2nda/main.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 333dec7..41d4cd2 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -15,6 +15,7 @@ import nibabel as nb import json import pandas as pd +import numpy as np # Gather our code in a main() function @@ -171,13 +172,8 @@ def run(args): dict_append(image03_dict, 'photomet_interpret', metadata.get("PhotometricInterpretation", "")) plane = metadata.get("ImageOrientationPatient") - plane=round(plane) - if plane[0] == 1: - dict_append(image03_dict, 'image_orientation.', "Sagittal") - elif plane[1] == 1: - dict_append(image03_dict, 'image_orientation.', "Coronal") - elif plane[2] == 1: - dict_append(image03_dict, 'image_orientation.', "Axial") + get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])] + dict_append(image03_dict, 'image_orientation.',get_orientation(plane)) dict_append(image03_dict, 'transformation_performed', 'Yes') dict_append(image03_dict, 'transformation_type', 'BIDS2NDA') From ef32caf4e3efa069ce433e64b74e19460e975f74 Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Sun, 14 Jan 2018 00:04:57 -0500 Subject: [PATCH 05/19] Update references Updated for SliceThickness and PhotometricInterpretation --- bids2nda/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 41d4cd2..3c27d10 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -168,8 +168,8 @@ def run(args): dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", "")) dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", "")) dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", "")) - dict_append(image03_dict, 'image_slice_thickness', metadata.get("SliceThickness", "")) - dict_append(image03_dict, 'photomet_interpret', metadata.get("PhotometricInterpretation", "")) + dict_append(image03_dict, 'image_slice_thickness', metadata.get("global.const.SliceThickness", "")) + dict_append(image03_dict, 'photomet_interpret', metadata.get("global.const.PhotometricInterpretation", "")) plane = metadata.get("ImageOrientationPatient") get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])] From 80ee7fff407281a67bbb0daf88821d4a555566fd Mon Sep 17 00:00:00 2001 From: mtnhuck Date: Mon, 15 Jan 2018 10:51:57 -0500 Subject: [PATCH 06/19] Finally got it to pull PhotometricInterpretation but doesn't work for fieldmaps --- bids2nda/main.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 3c27d10..c37e46b 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -168,10 +168,9 @@ def run(args): dict_append(image03_dict, 'mri_echo_time_pd', metadata.get("EchoTime", "")) dict_append(image03_dict, 'flip_angle', metadata.get("FlipAngle", "")) dict_append(image03_dict, 'receive_coil', metadata.get("ReceiveCoilName", "")) - dict_append(image03_dict, 'image_slice_thickness', metadata.get("global.const.SliceThickness", "")) - dict_append(image03_dict, 'photomet_interpret', metadata.get("global.const.PhotometricInterpretation", "")) - plane = metadata.get("ImageOrientationPatient") + + plane = metadata.get("ImageOrientationPatient","") get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])] dict_append(image03_dict, 'image_orientation.',get_orientation(plane)) dict_append(image03_dict, 'transformation_performed', 'Yes') @@ -201,6 +200,8 @@ def run(args): dict_append(image03_dict, 'image_resolution1', nii.header.get_zooms()[0]) dict_append(image03_dict, 'image_resolution2', nii.header.get_zooms()[1]) dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2]) + dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2]) + dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation"),"")) if len(nii.shape) > 3: image_resolution4 = nii.header.get_zooms()[3] else: From e4eb087c9f35404a88ece846de28a8b79d2706fb Mon Sep 17 00:00:00 2001 From: leej3 Date: Thu, 1 Feb 2018 18:17:14 -0500 Subject: [PATCH 07/19] fix photomet and image_orientation additions --- bids2nda/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index c37e46b..d732942 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -172,7 +172,7 @@ def run(args): plane = metadata.get("ImageOrientationPatient","") get_orientation = lambda place: ['Axial','Coronal','Sagittal'][np.argmax(plane[:3])] - dict_append(image03_dict, 'image_orientation.',get_orientation(plane)) + dict_append(image03_dict, 'image_orientation',get_orientation(plane)) dict_append(image03_dict, 'transformation_performed', 'Yes') dict_append(image03_dict, 'transformation_type', 'BIDS2NDA') @@ -201,7 +201,7 @@ def run(args): dict_append(image03_dict, 'image_resolution2', nii.header.get_zooms()[1]) dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2]) dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2]) - dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation"),"")) + dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","")) if len(nii.shape) > 3: image_resolution4 = nii.header.get_zooms()[3] else: From 0007c8f73a481a16bcc43a32334f393419ec982b Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 11:47:49 -0400 Subject: [PATCH 08/19] make interview_age extraction more robust --- bids2nda/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index d732942..44e8bbb 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -109,6 +109,7 @@ def run(args): "msec": "Milliseconds"} participants_df = pd.read_csv(os.path.join(args.bids_directory, "participants.tsv"), header=0, sep="\t") + participants_df['age'] = participants_df.age.astype(str).str.rstrip('Y').str.lstrip('0') image03_dict = OrderedDict() for file in glob(os.path.join(args.bids_directory, "sub-*", "*", "sub-*.nii.gz")) + \ @@ -141,7 +142,7 @@ def run(args): ndar_date = sdate[1] + "/" + sdate[2].split("T")[0] + "/" + sdate[0] dict_append(image03_dict, 'interview_date', ndar_date) - interview_age = int(round(list(participants_df[participants_df.participant_id == "sub-" + sub].age)[0], 0))*12 + interview_age = int(round(float(participants_df[participants_df.participant_id == "sub-" + sub].age.values[0]), 0)*12) dict_append(image03_dict, 'interview_age', interview_age) sex = list(participants_df[participants_df.participant_id == "sub-" + sub].sex)[0] From cb15ac18bb4c655dd9d0e988bdae7c564d3b5a61 Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 12:31:05 -0400 Subject: [PATCH 09/19] fix bug in slice-timing and TR extraction There was a redundant if/else confusing things. I removed it. Also a value always has to be assigned to each element of image03dict the conversion to a dataframe fails --- bids2nda/main.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 44e8bbb..d272dff 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -203,28 +203,29 @@ def run(args): dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2]) dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2]) dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","")) - if len(nii.shape) > 3: - image_resolution4 = nii.header.get_zooms()[3] - else: - image_resolution4 = "" - dict_append(image03_dict, 'image_resolution4', image_resolution4) - dict_append(image03_dict, 'image_unit1', units_dict[nii.header.get_xyzt_units()[0]]) - dict_append(image03_dict, 'image_unit2', units_dict[nii.header.get_xyzt_units()[0]]) - dict_append(image03_dict, 'image_unit3', units_dict[nii.header.get_xyzt_units()[0]]) if len(nii.shape) > 3: + image_resolution4 = nii.header.get_zooms()[3] image_unit4 = units_dict[nii.header.get_xyzt_units()[1]] if image_unit4 == "Milliseconds": TR = nii.header.get_zooms()[3]/1000. else: TR = nii.header.get_zooms()[3] - dict_append(image03_dict, 'mri_repetition_time_pd', TR) else: + image_resolution4 = "" image_unit4 = "" - dict_append(image03_dict, 'mri_repetition_time_pd', metadata.get("RepetitionTime", "")) + TR = metadata.get("RepetitionTime", "") + + slice_timing = metadata.get("SliceTiming", "") - dict_append(image03_dict, 'slice_timing', metadata.get("SliceTiming", "")) + + dict_append(image03_dict, 'slice_timing', slice_timing) dict_append(image03_dict, 'image_unit4', image_unit4) + dict_append(image03_dict, 'mri_repetition_time_pd', TR) + dict_append(image03_dict, 'image_resolution4', image_resolution4) + dict_append(image03_dict, 'image_unit1', units_dict[nii.header.get_xyzt_units()[0]]) + dict_append(image03_dict, 'image_unit2', units_dict[nii.header.get_xyzt_units()[0]]) + dict_append(image03_dict, 'image_unit3', units_dict[nii.header.get_xyzt_units()[0]]) dict_append(image03_dict, 'mri_field_of_view_pd', "%g x %g %s" % (nii.header.get_zooms()[0], nii.header.get_zooms()[1], @@ -290,6 +291,11 @@ def run(args): dict_append(image03_dict, 'bvalfile', "") dict_append(image03_dict, 'bvek_bval_files', "") + # all values of image03_dict should be the same length. + # Fail when this is not true instead of when the dataframe + # is created. + assert(len(set(map(len,image03_dict.values()))) ==1) + image03_df = pd.DataFrame(image03_dict) with open(os.path.join(args.output_directory, "image03.txt"), "w") as out_fp: From c7c5de650064ef50b5ac95a4147f82b8ab8aa348 Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 15:36:19 -0400 Subject: [PATCH 10/19] further merging of if/else for 3d vs higher-d scans --- bids2nda/main.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index d272dff..b3f2719 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -182,12 +182,6 @@ def run(args): dict_append(image03_dict, 'image_extent1', nii.shape[0]) dict_append(image03_dict, 'image_extent2', nii.shape[1]) dict_append(image03_dict, 'image_extent3', nii.shape[2]) - if len(nii.shape) > 3: - image_extent4 = nii.shape[3] - else: - image_extent4 = "" - - dict_append(image03_dict, 'image_extent4', image_extent4) if suffix == "bold": extent4_type = "time" elif suffix == "dwi": @@ -205,6 +199,7 @@ def run(args): dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","")) if len(nii.shape) > 3: + image_extent4 = nii.shape[3] image_resolution4 = nii.header.get_zooms()[3] image_unit4 = units_dict[nii.header.get_xyzt_units()[1]] if image_unit4 == "Milliseconds": @@ -214,11 +209,13 @@ def run(args): else: image_resolution4 = "" image_unit4 = "" + image_extent4 = "" TR = metadata.get("RepetitionTime", "") slice_timing = metadata.get("SliceTiming", "") + dict_append(image03_dict, 'image_extent4', image_extent4) dict_append(image03_dict, 'slice_timing', slice_timing) dict_append(image03_dict, 'image_unit4', image_unit4) dict_append(image03_dict, 'mri_repetition_time_pd', TR) From e3ec39e57e859b379d0f03c82720ff8d91658631 Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 15:37:33 -0400 Subject: [PATCH 11/19] add arg for experiment id. I think its the id listed for collection on the NDA site. --- bids2nda/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index b3f2719..808bf04 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -153,7 +153,7 @@ def run(args): suffix = file.split("_")[-1].split(".")[0] if suffix == "bold": description = suffix + " " + metadata["TaskName"] - dict_append(image03_dict, 'experiment_id', metadata.get("ExperimentID", "")) + dict_append(image03_dict, 'experiment_id', metadata.get("ExperimentID", args.experiment_id)) else: description = suffix dict_append(image03_dict, 'experiment_id', '') @@ -314,6 +314,9 @@ def error(self, message): "bids_directory", help="Location of the root of your BIDS compatible directory", metavar="BIDS_DIRECTORY") + parser.add_argument('-e', '--experiment_id', default=None, + help = ("Functional scans require an experiment_id. If ExperimentID is not" + " found in the scan metadata this value is used")) parser.add_argument( "guid_mapping", help="Path to a text file with participant_id to GUID mapping. You will need to use the " From b3efa64af9ed9b85f189eb557cd91483e21a3427 Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 15:38:03 -0400 Subject: [PATCH 12/19] update available scan types --- bids2nda/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 808bf04..ac96b25 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -86,8 +86,11 @@ def run(args): "PD": "MR structural (PD)", #"MR structural(FSPGR)", "T2w": "MR structural (T2)", + "T2map": "MR structural (T2)", + "T2star": "MR: T2star", + "FLAIR": "MR: FLAIR", + "asl": "ASL", #PET; - #ASL; #microscopy; #MR structural(PD, T2); #MR structural(B0 map); From f0a1cf6143af54a983298d24932f3efde566cbd5 Mon Sep 17 00:00:00 2001 From: leej3 Date: Sat, 2 Jun 2018 15:38:34 -0400 Subject: [PATCH 13/19] do not overwrite existing zip files by default. --- bids2nda/main.py | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index ac96b25..3050bbc 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -242,21 +242,24 @@ def run(args): if len(metadata) > 0 or suffix in ['bold', 'dwi']: _, fname = os.path.split(file) zip_name = fname.split(".")[0] + ".metadata.zip" - with zipfile.ZipFile(os.path.join(args.output_directory, zip_name), 'w', zipfile.ZIP_DEFLATED) as zipf: - - zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True)) - if suffix == "bold": - #TODO write a more robust function for finding those files - events_file = file.split("_bold")[0] + "_events.tsv" - arch_name = os.path.split(events_file)[1] - if not os.path.exists(events_file): - task_name = file.split("_task-")[1].split("_")[0] - events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") - - if os.path.exists(events_file): - zipf.write(events_file, arch_name) - - dict_append(image03_dict, 'data_file2', os.path.join(args.output_directory, zip_name)) + zip_path = os.path.join(args.output_directory, zip_name) + zip_path_exists = os.path.exists(zip_path) + if not zip_path_exists or (zip_path_exists and args.overwrite_zips): + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + + zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True)) + if suffix == "bold": + #TODO write a more robust function for finding those files + events_file = file.split("_bold")[0] + "_events.tsv" + arch_name = os.path.split(events_file)[1] + if not os.path.exists(events_file): + task_name = file.split("_task-")[1].split("_")[0] + events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") + + if os.path.exists(events_file): + zipf.write(events_file, arch_name) + + dict_append(image03_dict, 'data_file2', zip_path) dict_append(image03_dict, 'data_file2_type', "ZIP file with additional metadata from Brain Imaging " "Data Structure (http://bids.neuroimaging.io)") else: @@ -320,6 +323,9 @@ def error(self, message): parser.add_argument('-e', '--experiment_id', default=None, help = ("Functional scans require an experiment_id. If ExperimentID is not" " found in the scan metadata this value is used")) + parser.add_argument('-o', '--overwrite_zips', action='store_true', + help = ("If a conversion has already been performed, the default is " + "to avoid rewriting each zip file generated and instead just rewrite image03.txt")) parser.add_argument( "guid_mapping", help="Path to a text file with participant_id to GUID mapping. You will need to use the " From d3bb505651be7586196d7e6c37724c97eade74e8 Mon Sep 17 00:00:00 2001 From: leej3 Date: Tue, 24 Jul 2018 11:54:06 -0400 Subject: [PATCH 14/19] set default photometric_interp to monochrome2 --- bids2nda/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 3050bbc..fd75054 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -199,7 +199,7 @@ def run(args): dict_append(image03_dict, 'image_resolution2', nii.header.get_zooms()[1]) dict_append(image03_dict, 'image_resolution3', nii.header.get_zooms()[2]) dict_append(image03_dict, 'image_slice_thickness', nii.header.get_zooms()[2]) - dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","")) + dict_append(image03_dict, 'photomet_interpret', metadata.get("global",{}).get("const",{}).get("PhotometricInterpretation","MONOCHROME2")) if len(nii.shape) > 3: image_extent4 = nii.shape[3] From 8270394d70126f1b697f7b53c62b8ccd8310bb0b Mon Sep 17 00:00:00 2001 From: shotgunosine Date: Thu, 4 Apr 2019 12:55:42 -0400 Subject: [PATCH 15/19] Add pulling age in months from session files and support for manifest --- bids2nda/main.py | 160 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 125 insertions(+), 35 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 772c7aa..25bc9fa 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -11,6 +11,8 @@ from glob import glob import os import sys +import hashlib +from pathlib import Path import nibabel as nb import json @@ -75,6 +77,24 @@ def dict_append(d, key, value): d[key] = [value, ] +def mani_dict_from_filepath(filepath): + mani = {} + mani["path"] = filepath + mani["name"] = os.path.basename(filepath) + pathStr=mani["path"].encode() + mani["md5sum"] = hashlib.md5(pathStr).hexdigest() + mani["size"] = os.path.getsize(pathStr) + return mani + + +def write_mani_files(imagefile, outputfile, files=None): + json_dict = {"files": [mani_dict_from_filepath(imagefile)]} + if files is not None: + for ff in files: + json_dict["files"].append(mani_dict_from_filepath(ff)) + Path(outputfile).write_text(json.dumps(json_dict, indent=2)) + + def run(args): guid_mapping = dict([line.split(" - ") for line in open(args.guid_mapping).read().split("\n") if line != '']) @@ -113,7 +133,11 @@ def run(args): "msec": "Milliseconds"} participants_df = pd.read_csv(os.path.join(args.bids_directory, "participants.tsv"), header=0, sep="\t") - participants_df['age'] = participants_df.age.astype(str).str.rstrip('Y').str.lstrip('0') + try: + participants_df['age'] = participants_df.age.astype(str).str.rstrip('Y').str.lstrip('0') + except AttributeError: + # If we can't find an age, it might be in the sessions.tsvs in which case we'll grab it later + pass image03_dict = OrderedDict() for file in glob(os.path.join(args.bids_directory, "sub-*", "*", "sub-*.nii.gz")) + \ @@ -129,6 +153,7 @@ def run(args): if "ses-" in file: ses = file.split("ses-")[-1].split("_")[0] scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "ses-" + ses, "sub-" + sub + "_ses-" + ses + "_scans.tsv")) + sess_file = (os.path.join(args.bids_directory, "sub-" + sub,"sub-" + sub + "_sessions.tsv")) else: scans_file = (os.path.join(args.bids_directory, "sub-" + sub, "sub-" + sub + "_scans.tsv")) @@ -146,13 +171,22 @@ def run(args): ndar_date = sdate[1] + "/" + sdate[2].split("T")[0] + "/" + sdate[0] dict_append(image03_dict, 'interview_date', ndar_date) - interview_age = int(round(float(participants_df[participants_df.participant_id == "sub-" + sub].age.values[0]), 0)*12) + try: + interview_age = int(round(float(participants_df[participants_df.participant_id == "sub-" + sub].age.values[0]), 0)*12) + except AttributeError: + sess_df = pd.read_csv(sess_file, header=0, sep='\t') + if 'age_months' in sess_df.columns: + interview_age = sess_df.loc[sess_df.session_id == ("ses-" + ses),'age_months'].values[0] + else: + interview_age = int(round(float(sess_df.loc[sess_df.session_id == ("ses-" + ses), 'age'].values[0]), 0)*12) dict_append(image03_dict, 'interview_age', interview_age) sex = list(participants_df[participants_df.participant_id == "sub-" + sub].sex)[0] dict_append(image03_dict, 'gender', sex) - dict_append(image03_dict, 'image_file', file) + # image_file field not used if manifest is used + if not args.manifest: + dict_append(image03_dict, 'image_file', file) suffix = file.split("_")[-1].split(".")[0] if suffix == "bold": @@ -240,32 +274,83 @@ def run(args): dict_append(image03_dict, 'visit', visit) - if len(metadata) > 0 or suffix in ['bold', 'dwi']: + + if args.manifest: _, fname = os.path.split(file) - zip_name = fname.split(".")[0] + ".metadata.zip" - zip_path = os.path.join(args.output_directory, zip_name) - zip_path_exists = os.path.exists(zip_path) - if not zip_path_exists or (zip_path_exists and args.overwrite_zips): - with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: - - zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True)) - if suffix == "bold": - #TODO write a more robust function for finding those files - events_file = file.split("_bold")[0] + "_events.tsv" - arch_name = os.path.split(events_file)[1] - if not os.path.exists(events_file): - task_name = file.split("_task-")[1].split("_")[0] - events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") - - if os.path.exists(events_file): - zipf.write(events_file, arch_name) - - dict_append(image03_dict, 'data_file2', zip_path) - dict_append(image03_dict, 'data_file2_type', "ZIP file with additional metadata from Brain Imaging " - "Data Structure (http://bids.neuroimaging.io)") + manifest_name = fname.split(".")[0] + "_manifest.json" + manifest_path = os.path.join(args.output_directory, 'manifest', manifest_name) + if not os.path.exists(os.path.join(args.output_directory, 'manifest')): + Path(os.path.join(args.output_directory, 'manifest')).mkdir(parents=True) + manifest_path_exists = os.path.exists(manifest_path) + manifest_files = None + if not manifest_path_exists or (manifest_path_exists and args.overwrite_zips): + # get the list of files for the manifest + manifest_files = [] + if len(metadata) > 0: + manifest_files.append(file.replace(".nii.gz", ".json")) + if suffix == "bold": + events_file = file.split("_bold")[0] + "_events.tsv" + arch_name = os.path.split(events_file)[1] + if not os.path.exists(events_file): + task_name = file.split("_task-")[1].split("_")[0] + events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") + if os.path.exists(events_file): + manifest_files.append(events_file) + if suffix == "dwi": + # TODO write a more robust function for finding those files + bvec_file = file.split("_dwi")[0] + "_dwi.bvec" + if not os.path.exists(bvec_file): + bvec_file = os.path.join(args.bids_directory, "dwi.bvec") + + if os.path.exists(bvec_file): + dict_append(image03_dict, 'bvecfile', bvec_file) + else: + dict_append(image03_dict, 'bvecfile', "") + + bval_file = file.split("_dwi")[0] + "_dwi.bval" + if not os.path.exists(bval_file): + bval_file = os.path.join(args.bids_directory, "dwi.bval") + + if os.path.exists(bval_file): + dict_append(image03_dict, 'bvalfile', bval_file) + else: + dict_append(image03_dict, 'bvalfile', "") + if os.path.exists(bval_file) or os.path.exists(bvec_file): + dict_append(image03_dict, 'bvek_bval_files', 'Yes') + else: + dict_append(image03_dict, 'bvek_bval_files', 'No') + + + write_mani_files(file, manifest_path, files=manifest_files) + dict_append(image03_dict, 'manifest', manifest_path) + else: - dict_append(image03_dict, 'data_file2', "") - dict_append(image03_dict, 'data_file2_type', "") + if len(metadata) > 0 or suffix in ['bold', 'dwi']: + _, fname = os.path.split(file) + zip_name = fname.split(".")[0] + ".metadata.zip" + zip_path = os.path.join(args.output_directory, zip_name) + zip_path_exists = os.path.exists(zip_path) + if not zip_path_exists or (zip_path_exists and args.overwrite_zips): + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: + + zipf.writestr(fname.replace(".nii.gz", ".json"), json.dumps(metadata, indent=4, sort_keys=True)) + if suffix == "bold": + #TODO write a more robust function for finding those files + events_file = file.split("_bold")[0] + "_events.tsv" + arch_name = os.path.split(events_file)[1] + if not os.path.exists(events_file): + task_name = file.split("_task-")[1].split("_")[0] + events_file = os.path.join(args.bids_directory, "task-" + task_name + "_events.tsv") + + if os.path.exists(events_file): + zipf.write(events_file, arch_name) + + dict_append(image03_dict, 'data_file2', zip_path) + dict_append(image03_dict, 'data_file2_type', "ZIP file with additional metadata from Brain Imaging " + "Data Structure (http://bids.neuroimaging.io)") + else: + dict_append(image03_dict, 'data_file2', "") + dict_append(image03_dict, 'data_file2_type', "") if suffix == "dwi": # TODO write a more robust function for finding those files @@ -294,17 +379,20 @@ def run(args): dict_append(image03_dict, 'bvecfile', "") dict_append(image03_dict, 'bvalfile', "") dict_append(image03_dict, 'bvek_bval_files', "") + - # all values of image03_dict should be the same length. - # Fail when this is not true instead of when the dataframe - # is created. - assert(len(set(map(len,image03_dict.values()))) ==1) + # all values of image03_dict should be the same length. + # Fail when this is not true instead of when the dataframe + # is created. + assert(len(set(map(len,image03_dict.values()))) ==1) image03_df = pd.DataFrame(image03_dict) - with open(os.path.join(args.output_directory, "image03.txt"), "w") as out_fp: - out_fp.write('"image"\t"3"\n') - image03_df.to_csv(out_fp, sep="\t", index=False, quoting=csv.QUOTE_ALL) + with open(os.path.join(args.output_directory, "image03.csv"), "w") as out_fp: + nfields = image03_df.shape[1] + head_string = 'image,3,' + (',' * (nfields - 3)) + '\n' + out_fp.write(head_string) + image03_df.to_csv(out_fp, sep=",", index=False, quoting=csv.QUOTE_ALL) def main(): class MyParser(argparse.ArgumentParser): @@ -326,7 +414,7 @@ def error(self, message): " found in the scan metadata this value is used")) parser.add_argument('-o', '--overwrite_zips', action='store_true', help = ("If a conversion has already been performed, the default is " - "to avoid rewriting each zip file generated and instead just rewrite image03.txt")) + "to avoid rewriting each zip or manifest file generated and instead just rewrite image03.txt")) parser.add_argument( "guid_mapping", help="Path to a text file with participant_id to GUID mapping. You will need to use the " @@ -336,6 +424,8 @@ def error(self, message): "output_directory", help="Directory where NDA files will be stored", metavar="OUTPUT_DIRECTORY") + parser.add_argument('-m', '--manifest', action='store_true', + help="Use manifest file pairs to track image and json pairs") args = parser.parse_args() run(args) From 5d4fe78f852bc28b23ebf3c020c42648430c4bb1 Mon Sep 17 00:00:00 2001 From: shotgunosine Date: Thu, 4 Apr 2019 13:52:02 -0400 Subject: [PATCH 16/19] add bval and bvec files to manifest --- bids2nda/main.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 25bc9fa..146dad4 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -303,22 +303,14 @@ def run(args): bvec_file = os.path.join(args.bids_directory, "dwi.bvec") if os.path.exists(bvec_file): - dict_append(image03_dict, 'bvecfile', bvec_file) - else: - dict_append(image03_dict, 'bvecfile', "") + manifest_files.append(bvec_file) bval_file = file.split("_dwi")[0] + "_dwi.bval" if not os.path.exists(bval_file): bval_file = os.path.join(args.bids_directory, "dwi.bval") if os.path.exists(bval_file): - dict_append(image03_dict, 'bvalfile', bval_file) - else: - dict_append(image03_dict, 'bvalfile', "") - if os.path.exists(bval_file) or os.path.exists(bvec_file): - dict_append(image03_dict, 'bvek_bval_files', 'Yes') - else: - dict_append(image03_dict, 'bvek_bval_files', 'No') + manifest_files.append(bvec_file) write_mani_files(file, manifest_path, files=manifest_files) From 3d9acda594b93d6cf753004e4fdfed613a76c1ed Mon Sep 17 00:00:00 2001 From: shotgunosine Date: Thu, 4 Apr 2019 13:57:47 -0400 Subject: [PATCH 17/19] FIX fill bval_bvec_file if manifest submission --- bids2nda/main.py | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 146dad4..d5d7c2c 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -311,6 +311,9 @@ def run(args): if os.path.exists(bval_file): manifest_files.append(bvec_file) + + if os.path.exists(bval_file) & os.path.exists(bvec_file): + dict_append(image03_dict, 'bvek_bval_files', 'Yes') write_mani_files(file, manifest_path, files=manifest_files) @@ -344,33 +347,33 @@ def run(args): dict_append(image03_dict, 'data_file2', "") dict_append(image03_dict, 'data_file2_type', "") - if suffix == "dwi": - # TODO write a more robust function for finding those files - bvec_file = file.split("_dwi")[0] + "_dwi.bvec" - if not os.path.exists(bvec_file): - bvec_file = os.path.join(args.bids_directory, "dwi.bvec") + if suffix == "dwi": + # TODO write a more robust function for finding those files + bvec_file = file.split("_dwi")[0] + "_dwi.bvec" + if not os.path.exists(bvec_file): + bvec_file = os.path.join(args.bids_directory, "dwi.bvec") - if os.path.exists(bvec_file): - dict_append(image03_dict, 'bvecfile', bvec_file) - else: - dict_append(image03_dict, 'bvecfile', "") + if os.path.exists(bvec_file): + dict_append(image03_dict, 'bvecfile', bvec_file) + else: + dict_append(image03_dict, 'bvecfile', "") - bval_file = file.split("_dwi")[0] + "_dwi.bval" - if not os.path.exists(bval_file): - bval_file = os.path.join(args.bids_directory, "dwi.bval") + bval_file = file.split("_dwi")[0] + "_dwi.bval" + if not os.path.exists(bval_file): + bval_file = os.path.join(args.bids_directory, "dwi.bval") - if os.path.exists(bval_file): - dict_append(image03_dict, 'bvalfile', bval_file) + if os.path.exists(bval_file): + dict_append(image03_dict, 'bvalfile', bval_file) + else: + dict_append(image03_dict, 'bvalfile', "") + if os.path.exists(bval_file) or os.path.exists(bvec_file): + dict_append(image03_dict, 'bvek_bval_files', 'Yes') + else: + dict_append(image03_dict, 'bvek_bval_files', 'No') else: + dict_append(image03_dict, 'bvecfile', "") dict_append(image03_dict, 'bvalfile', "") - if os.path.exists(bval_file) or os.path.exists(bvec_file): - dict_append(image03_dict, 'bvek_bval_files', 'Yes') - else: - dict_append(image03_dict, 'bvek_bval_files', 'No') - else: - dict_append(image03_dict, 'bvecfile', "") - dict_append(image03_dict, 'bvalfile', "") - dict_append(image03_dict, 'bvek_bval_files', "") + dict_append(image03_dict, 'bvek_bval_files', "") # all values of image03_dict should be the same length. From 76dfe24dedddb07fd57ab37563380f7d1fa98aca Mon Sep 17 00:00:00 2001 From: shotgunosine Date: Wed, 3 Apr 2019 14:51:44 -0400 Subject: [PATCH 18/19] ADD mappings from ASL extension scan types --- bids2nda/main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bids2nda/main.py b/bids2nda/main.py index d5d7c2c..120a378 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -110,6 +110,9 @@ def run(args): "T2star": "MR: T2star", "FLAIR": "MR: FLAIR", "asl": "ASL", + "MZeroScan": "ASL", + "DeltaM": "ASL", + "CBF": "ASL", "FLASH": "MR structural (FLASH)", #PET; #microscopy; @@ -124,6 +127,7 @@ def run(args): "phasediff": "Field Map", "magnitude1": "Field Map", "magnitude2": "Field Map", + "magnitude": "Field Map", "fieldmap": "Field Map" #X - Ray } From 47e165d12f4ab54b0b80829287c065f5a740d697 Mon Sep 17 00:00:00 2001 From: shotgunosine Date: Mon, 8 Apr 2019 14:16:51 -0400 Subject: [PATCH 19/19] FIX update bvek_bavl_files correctly --- bids2nda/main.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/bids2nda/main.py b/bids2nda/main.py index 120a378..6312a51 100644 --- a/bids2nda/main.py +++ b/bids2nda/main.py @@ -316,8 +316,12 @@ def run(args): if os.path.exists(bval_file): manifest_files.append(bvec_file) - if os.path.exists(bval_file) & os.path.exists(bvec_file): + if os.path.exists(bval_file) or os.path.exists(bvec_file): dict_append(image03_dict, 'bvek_bval_files', 'Yes') + else: + dict_append(image03_dict, 'bvek_bval_files', 'No') + else: + dict_append(image03_dict, 'bvek_bval_files', "") write_mani_files(file, manifest_path, files=manifest_files) @@ -378,7 +382,8 @@ def run(args): dict_append(image03_dict, 'bvecfile', "") dict_append(image03_dict, 'bvalfile', "") dict_append(image03_dict, 'bvek_bval_files', "") - + + # all values of image03_dict should be the same length. # Fail when this is not true instead of when the dataframe