From 4337c5522a9cda1c55607cbcebfdbebbfee55d82 Mon Sep 17 00:00:00 2001 From: Maximilian Rokuss Date: Tue, 29 Oct 2024 12:09:29 +0100 Subject: [PATCH] Update trainer class and readme --- nnunetv2/training/nnUNetTrainer/autoPET3_Trainer.py | 1 + readme.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nnunetv2/training/nnUNetTrainer/autoPET3_Trainer.py b/nnunetv2/training/nnUNetTrainer/autoPET3_Trainer.py index bf49f1e4..fd0a67dd 100644 --- a/nnunetv2/training/nnUNetTrainer/autoPET3_Trainer.py +++ b/nnunetv2/training/nnUNetTrainer/autoPET3_Trainer.py @@ -77,6 +77,7 @@ class autoPET3_Trainer(nnUNetTrainer): def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, device: torch.device = torch.device('cuda')): super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 1500 self.initial_lr = 1e-3 @staticmethod diff --git a/readme.md b/readme.md index 0def532a..7ed13985 100644 --- a/readme.md +++ b/readme.md @@ -24,7 +24,7 @@ Our model builds on [nnU-Net](https://github.com/MIC-DKFZ/nnUNet) with a [ResEnc - The model is trained using [misalignment data augmentation](https://github.com/MIC-DKFZ/misalignment_DA) as well as omitting the smoothing term in the dice loss calcuation. - We use a dual-headed architecture for organ and lesion segmentation which improves performance as well as speeds up convergence, especially in cases without lesions. -**You can [download the final checkpoint here](https://zenodo.org/records/13786235)!** +**You can [download the final checkpoint here](https://zenodo.org/records/13786235)!** We updated the trainer class on Oct 29, 2024. Please download again in case you had troubles with the old checkpoint. ## Getting started @@ -76,7 +76,7 @@ Now you are good to go to start a training. Use the dataset with DATASET_ID_LESI ### Training -Training the model can be simply achieved by [downloading the pretrained checkpoint](https://zenodo.org/records/13753413) (Dataset619_nativemultistem) and running: +Training the model can be simply achieved by [downloading the pretrained (not the final) checkpoint](https://zenodo.org/records/13753413) (Dataset619_nativemultistem) and running: ```bash nnUNetv2_train DATASET_ID_LESIONS 3d_fullres 0 -tr autoPET3_Trainer -p nnUNetResEncUNetLPlansMultiTalent -pretrained_weights /path/to/pretrained/weights/fold_all/checkpoint_final.pth