From 942babf82628c880b9f126234872008e28fa0f6d Mon Sep 17 00:00:00 2001 From: Arthur Boschet Date: Tue, 20 Feb 2024 16:42:36 -0500 Subject: [PATCH 1/4] Add a --folds argument to the run_inference.py script which allows us to decide which folds we want to use. Default is all folds are used as previously. --- nnunet_scripts/run_inference.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/nnunet_scripts/run_inference.py b/nnunet_scripts/run_inference.py index e54962f..803ed6a 100644 --- a/nnunet_scripts/run_inference.py +++ b/nnunet_scripts/run_inference.py @@ -4,13 +4,13 @@ Author: Naga Karthik """ -import os import argparse -import torch -from pathlib import Path -from batchgenerators.utilities.file_and_folder_operations import join +import os import time +from pathlib import Path +import torch +from batchgenerators.utilities.file_and_folder_operations import join from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor if 'nnUNet_raw' not in os.environ: @@ -38,6 +38,8 @@ def get_parser() -> argparse.ArgumentParser: parser.add_argument('--path-model', required=True, help='Path to the model directory. This folder should contain individual folders ' 'like fold_0, fold_1, etc.',) + parser.add_argument('--folds', nargs='+', type=int, default=None, + help='List of folds to use for inference. If not specified, all available folds. Default: None') parser.add_argument('--use-gpu', action='store_true', default=False, help='Use GPU for inference. Default: False') parser.add_argument('--use-mirroring', action='store_true', default=False, @@ -136,7 +138,10 @@ def main(): path_pred = os.path.join(args.path_out, add_suffix(fname, '_pred')) path_out.append(path_pred) - folds_avail = [int(f.split('_')[-1]) for f in os.listdir(args.path_model) if f.startswith('fold_')] + if args.folds is not None: + folds_avail = args.folds + else: + folds_avail = [int(f.split('_')[-1]) for f in os.listdir(args.path_model) if f.startswith('fold_')] print('Starting inference...') start = time.time() From ac8030891948a2a20c5bcde1b8baeae945eefdc7 Mon Sep 17 00:00:00 2001 From: Arthur Boschet Date: Tue, 20 Feb 2024 16:46:59 -0500 Subject: [PATCH 2/4] Allow the run_evaluation.py to create the directory where the output .csv file is located --- nnunet_scripts/run_evaluation.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nnunet_scripts/run_evaluation.py b/nnunet_scripts/run_evaluation.py index 61df3a2..7ab98fa 100644 --- a/nnunet_scripts/run_evaluation.py +++ b/nnunet_scripts/run_evaluation.py @@ -7,14 +7,17 @@ import argparse import json +import os import warnings +from pathlib import Path + import cv2 import numpy as np -import torch import pandas as pd -from pathlib import Path +import torch from monai.metrics import DiceMetric, MeanIoU + def compute_metrics(pred, gt, metric): """ Compute the given metric for a single image @@ -109,6 +112,8 @@ def main(): # Export the DataFrame to a CSV file output_fname = args.output_fname + '.csv' + output_dir = Path(output_fname).parent + os.makedirs(output_dir, exist_ok=True) df.to_csv(output_fname, index=False) print(f'Evaluation results saved to {output_fname}.') From ebe4c9de627503b565c18229ea39eb53c8ea10ce Mon Sep 17 00:00:00 2001 From: Arthur Boschet Date: Tue, 20 Feb 2024 16:56:33 -0500 Subject: [PATCH 3/4] Improve the inference_and_evaluation.sh script to do the evaluation on the ensemble of folds and also on the individual folds --- nnunet_scripts/inference_and_evaluation.sh | 27 +++++++++++++++------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/nnunet_scripts/inference_and_evaluation.sh b/nnunet_scripts/inference_and_evaluation.sh index 7c7ac79..b3a1de3 100755 --- a/nnunet_scripts/inference_and_evaluation.sh +++ b/nnunet_scripts/inference_and_evaluation.sh @@ -4,26 +4,37 @@ NNUNET_PATH=$1 DATASET_NAME="Dataset444_AGG" -if [ ! -d "${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/scores" ]; then - echo "Making directory ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/scores" - mkdir "${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/scores" -fi - shift for dataset in "$@"; do echo -e "\n\nRun inference with $NNUNET_PATH/$dataset model on $DATASET_NAME dataset\n\n" dataset_name=${dataset#Dataset???_} python nnunet_scripts/run_inference.py --path-dataset ${NNUNET_PATH}/nnUNet_raw/${DATASET_NAME}/imagesTs \ - --path-out ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/${dataset_name}_model_best_checkpoints_inference \ + --path-out ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/ensemble_models/${dataset_name}_model_best_checkpoints_inference \ --path-model ~/data/nnunet_all/nnUNet_results/${dataset}/nnUNetTrainer__nnUNetPlans__2d/ \ --use-gpu \ --use-best-checkpoint - python nnunet_scripts/run_evaluation.py --pred_path ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/${dataset_name}_model_best_checkpoints_inference \ + python nnunet_scripts/run_evaluation.py --pred_path ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/ensemble_models/${dataset_name}_model_best_checkpoints_inference \ --mapping_path ${NNUNET_PATH}/nnUNet_raw/${DATASET_NAME}/fname_mapping.json \ --gt_path ${NNUNET_PATH}/test_labels \ - --output_fname ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/scores/${dataset_name}_model_best_checkpoints_scores \ + --output_fname ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/ensemble_models/scores/${dataset_name}_model_best_checkpoints_scores \ --pred_suffix _0000 + # Run inference and evaluation for each fold + for fold in {0..4}; do + echo -e "\nProcessing fold $fold\n" + python nnunet_scripts/run_inference.py --path-dataset ${NNUNET_PATH}/nnUNet_raw/${DATASET_NAME}/imagesTs \ + --path-out ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/fold_{$fold}/${dataset_name}_model_best_checkpoints_inference \ + --path-model ~/data/nnunet_all/nnUNet_results/${dataset}/nnUNetTrainer__nnUNetPlans__2d/ \ + --folds $fold \ + --use-gpu \ + --use-best-checkpoint + + python nnunet_scripts/run_evaluation.py --pred_path ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/fold_{$fold}/${dataset_name}_model_best_checkpoints_inference \ + --mapping_path ${NNUNET_PATH}/nnUNet_raw/${DATASET_NAME}/fname_mapping.json \ + --gt_path ${NNUNET_PATH}/test_labels \ + --output_fname ${NNUNET_PATH}/nnUNet_results/${DATASET_NAME}/fold_{$fold}/scores/${dataset_name}_model_best_checkpoints_scores \ + --pred_suffix _0000 + done done \ No newline at end of file From 94abee5c2fc13fc0dd5eaf3b74896873c8981194 Mon Sep 17 00:00:00 2001 From: Arthur Boschet Date: Tue, 20 Feb 2024 17:03:13 -0500 Subject: [PATCH 4/4] remove unused --use_mirroring argument which was set to true in the code regardless of the actual argument. --- nnunet_scripts/run_inference.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/nnunet_scripts/run_inference.py b/nnunet_scripts/run_inference.py index 803ed6a..727add6 100644 --- a/nnunet_scripts/run_inference.py +++ b/nnunet_scripts/run_inference.py @@ -42,9 +42,6 @@ def get_parser() -> argparse.ArgumentParser: help='List of folds to use for inference. If not specified, all available folds. Default: None') parser.add_argument('--use-gpu', action='store_true', default=False, help='Use GPU for inference. Default: False') - parser.add_argument('--use-mirroring', action='store_true', default=False, - help='Use mirroring (test-time) augmentation for prediction. ' - 'NOTE: Inference takes a long time when this is enabled. Default: False') parser.add_argument('--use-best-checkpoint', action='store_true', default=False, help='Use the best checkpoint (instead of the final checkpoint) for prediction. ' 'NOTE: nnUNet by default uses the final checkpoint. Default: False') @@ -149,7 +146,7 @@ def main(): tile_step_size=0.5, use_gaussian=True, use_mirroring=True, - perform_everything_on_gpu=True if args.use_gpu else False, + perform_everything_on_gpu=args.use_gpu, device=torch.device('cuda') if args.use_gpu else torch.device('cpu'), verbose=False, verbose_preprocessing=False,