From 9459c3b727df3c00e15502a9ea5c885aa1c760dd Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 27 Jun 2023 16:28:50 +0200 Subject: [PATCH 01/24] add initial nocuda Dockerfile and working TiffImporter and run.py script --- models/tiger_lb2/__init__.py | 1 + models/tiger_lb2/config/config.yml | 13 ++ .../tiger_lb2/dockerfiles/nocuda/Dockerfile | 58 +++++++++ models/tiger_lb2/scripts/run.py | 47 +++++++ models/tiger_lb2/utils/PanImgConverters.py | 122 ++++++++++++++++++ models/tiger_lb2/utils/TiffImporter.py | 50 +++++++ models/tiger_lb2/utils/TigerLB2Runner.py | 38 ++++++ models/tiger_lb2/utils/__init__.py | 1 + 8 files changed, 330 insertions(+) create mode 100644 models/tiger_lb2/__init__.py create mode 100644 models/tiger_lb2/config/config.yml create mode 100644 models/tiger_lb2/dockerfiles/nocuda/Dockerfile create mode 100644 models/tiger_lb2/scripts/run.py create mode 100644 models/tiger_lb2/utils/PanImgConverters.py create mode 100644 models/tiger_lb2/utils/TiffImporter.py create mode 100644 models/tiger_lb2/utils/TigerLB2Runner.py create mode 100644 models/tiger_lb2/utils/__init__.py diff --git a/models/tiger_lb2/__init__.py b/models/tiger_lb2/__init__.py new file mode 100644 index 00000000..90f60fdd --- /dev/null +++ b/models/tiger_lb2/__init__.py @@ -0,0 +1 @@ +from .utils import * \ No newline at end of file diff --git a/models/tiger_lb2/config/config.yml b/models/tiger_lb2/config/config.yml new file mode 100644 index 00000000..90396b3a --- /dev/null +++ b/models/tiger_lb2/config/config.yml @@ -0,0 +1,13 @@ +general: + data_base_dir: /app/data + +modules: + DicomImporter: + source_dir: input_data + import_dir: sorted_data + sort_data: True + meta: + mod: ct + + TiffImporter: + source_dir: input_data diff --git a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile new file mode 100644 index 00000000..c629c906 --- /dev/null +++ b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile @@ -0,0 +1,58 @@ +# Specify the base image for the environment +FROM mhubai/base:nocuda + +# Specify/override authors label +LABEL authors="sil.vandeleemput@radboudumc.nl" + +# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3-openslide \ + && rm -rf /var/lib/apt/lists/* +RUN pip3 install --no-cache-dir panimg + +# install required dependencies for algorithm (CPU-only) +RUN pip3 install --no-cache-dir torch==1.10.0+cpu torchvision==0.11.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + +# Install ASAP +RUN : \ + && apt-get update \ + && apt-get -y install curl libpython3.8-dev \ + && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ + && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ + && apt-get -f install --fix-missing --fix-broken --assume-yes \ + && ldconfig -v \ + && apt-get clean \ + && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ + && rm ASAP-2.1-py38-Ubuntu2004.deb \ + && : + +# Install tiger LB2 algorithm +# - We use a shallow git clone for reduced bandwidth usage +# - Subsequently we remove the .git directory to procuce a compacter docker layer +RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ + && rm -rf /vuno/.git + +# Modify model loading code to load model weights to the cpu instead of the gpu +RUN sed -i 's/cuda:0/cpu/g' /vuno/detection/detection_inference.py \ + && sed -i 's/torch.load(str(ckpt_fname))/torch.load(str(ckpt_fname), map_location="cpu")/g' /vuno/segmentation/inference.py + +# Install tiger LB2 dependencies +RUN pip3 install --no-cache-dir -r /vuno/requirements.txt + +# Reinstall correct version of Numpy to function with ASAP 2.1 +RUN pip install --no-cache-dir --force-reinstall numpy==1.22 + + +# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) +#RUN git init \ +# && git sparse-checkout set "models/tiger_lb2" \ +# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ +# && git merge TODO + +# Add algorithm code base and model code bases to python path +# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts +# to solve this we add /app/models instead of /app to the python path +ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" + +# Default run script +CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/scripts/run.py b/models/tiger_lb2/scripts/run.py new file mode 100644 index 00000000..956631ee --- /dev/null +++ b/models/tiger_lb2/scripts/run.py @@ -0,0 +1,47 @@ +""" +------------------------------------------------------- +MHub / DIAG - Run HookNet Lung Segmentation Model +------------------------------------------------------- + +------------------------------------------------------- +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +------------------------------------------------------- +""" + +import sys +sys.path.append('.') + +from mhubio.core import Config, DataType, FileType +from mhubio.modules.organizer.DataOrganizer import DataOrganizer +from tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner +from tiger_lb2.utils.PanImgConverters import TiffPanImgConverter +from tiger_lb2.utils.TiffImporter import TiffImporter + +# clean-up +import shutil +shutil.rmtree("/app/data/sorted_data", ignore_errors=True) +shutil.rmtree("/app/tmp", ignore_errors=True) +shutil.rmtree("/app/data/output_data", ignore_errors=True) + +# config +config = Config('/app/models/tiger_lb2/config/config.yml') + +# TODO could be WSI Dicom input alternatively + +# import (wsi:dicom) +# DicomImporter(config).execute() + +# convert (wsi:dicom -> wsi:tiff) +# TiffPanImgConverter(config).execute() + +# import (wsi:tiff) +TiffImporter(config).execute() + +# execute model (wsi:tiff -> json) +TigerLB2Runner(config).execute() + +# organize data into output folder +organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')) +organizer.setTarget(DataType(FileType.JSON), "/app/data/output_data/[i:sid]/tiger_lb2_tils_score.json") +organizer.execute() diff --git a/models/tiger_lb2/utils/PanImgConverters.py b/models/tiger_lb2/utils/PanImgConverters.py new file mode 100644 index 00000000..25dd618e --- /dev/null +++ b/models/tiger_lb2/utils/PanImgConverters.py @@ -0,0 +1,122 @@ +""" +------------------------------------------------------------- +MHub - PanImg Conversion Modules Dicom2Mha and WSI-Dicom2Tiff +------------------------------------------------------------- + +------------------------------------------------------------- +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +------------------------------------------------------------- +""" + + +from typing import Optional + +from mhubio.modules.convert.DataConverter import DataConverter +from mhubio.core import Instance, InstanceData, DataType, FileType + +import os +from pathlib import Path +import shutil + +from panimg.exceptions import UnconsumedFilesException +from panimg.image_builders.dicom import image_builder_dicom +from panimg.image_builders.tiff import image_builder_tiff +from panimg.image_builders.metaio_nrrd import image_builder_nrrd + +import SimpleITK + + +class MhaPanImgConverter(DataConverter): + """ + Conversion module. + Convert instance data from dicom or nrrd to mha. + """ + + def convert(self, instance: Instance) -> Optional[InstanceData]: + + # create a converted instance + has_instance_dicom = instance.hasType(DataType(FileType.DICOM)) + has_instance_nrrd = instance.hasType(DataType(FileType.NRRD)) + + assert has_instance_dicom or has_instance_nrrd, f"CONVERT ERROR: required datatype (dicom or nrrd) not available in instance {str(instance)}." + + # select input data, dicom has priority over nrrd + input_data = instance.data.filter(DataType(FileType.DICOM) if has_instance_dicom else DataType(FileType.NRRD)).first() + + # out data + mha_data = InstanceData("image.mha", DataType(FileType.MHA, input_data.type.meta)) + mha_data.instance = instance + + # paths + inp_data_dir = Path(input_data.abspath) + out_mha_file = Path(mha_data.abspath) + + # sanity check + assert(inp_data_dir.is_dir()) + + # DICOM CT to MHA conversion (if the file doesn't exist yet) + if out_mha_file.is_file(): + print("CONVERT ERROR: File already exists: ", out_mha_file) + return None + else: + # run conversion using panimg + input_files = {f for f in inp_data_dir.glob(["*.nrrd", "*.dcm"][has_instance_dicom]) if f.is_file()} + img_builder = image_builder_dicom if has_instance_dicom else image_builder_nrrd + try: + for result in img_builder(files=input_files): + sitk_image = result.image # SimpleITK image + SimpleITK.WriteImage(sitk_image, str(out_mha_file)) + except UnconsumedFilesException as e: + # e.errors is keyed with a Path to a file that could not be consumed, + # with a list of all the errors found with loading it, + # the user can then choose what to do with that information + print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.errors) + return None + + return mha_data + + +class TiffPanImgConverter(DataConverter): + """ + Conversion module. + Convert instance data from WSI-dicom to tiff. + """ + + def convert(self, instance: Instance) -> Optional[InstanceData]: + + # create a converted instance + assert instance.hasType(DataType(FileType.DICOM)), f"CONVERT ERROR: required datatype (dicom) not available in instance {str(instance)}." + dicom_data = instance.data.filter(DataType(FileType.DICOM)).first() + + # out data + tiff_data = InstanceData("image.tiff", DataType(FileType.TIFF, dicom_data.type.meta)) + tiff_data.instance = instance + + # paths + inp_dicom_dir = Path(dicom_data.abspath) + out_tiff_file = Path(tiff_data.abspath) + + # sanity check + assert(inp_dicom_dir.is_dir()) + + # WSI-DICOM to TIFF conversion (if the file doesn't exist yet) + if out_tiff_file.is_file(): + print("CONVERT ERROR: File already exists: ", out_tiff_file) + return None + else: + # run conversion using panimg + dcm_input_files = {f for f in inp_dicom_dir.glob("*.dcm") if f.is_file()} + + try: + for result in image_builder_tiff(files=dcm_input_files): + tiff_image = result.file # Path to the tiff file + shutil.move(str(tiff_image), str(out_tiff_file)) + except UnconsumedFilesException as e: + # e.errors is keyed with a Path to a file that could not be consumed, + # with a list of all the errors found with loading it, + # the user can then choose what to do with that information + print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.errors) + return None + + return tiff_data diff --git a/models/tiger_lb2/utils/TiffImporter.py b/models/tiger_lb2/utils/TiffImporter.py new file mode 100644 index 00000000..86bd38d5 --- /dev/null +++ b/models/tiger_lb2/utils/TiffImporter.py @@ -0,0 +1,50 @@ +""" +-------------------------------------- +MHub / DIAG - Tiff importer +-------------------------------------- + +-------------------------------------- +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +-------------------------------------- +""" +import os +from typing import Optional +from pathlib import Path + +from mhubio.modules.importer.DataImporter import IDEF, DataImporter, FileType +from mhubio.core import Meta, DirectoryChain + + +# TODO should be moved to mhubio/core/templates.py +WSI = Meta(mod="wsi") + + +class TiffImporter(DataImporter): + def task(self) -> None: + source_dir = self.c['source_dir'] + source_dc = DirectoryChain(path=source_dir, parent=self.config.data.dc) + # input tiff file directory + input_dir = source_dc.abspath + self.v(f"{input_dir}") + + # add input tiff files as WSI images... + self.setBasePath(input_dir) + for input_tiff_file in Path(input_dir).glob("*.tif"): + self.v(f"{input_tiff_file}") + self.addTiffWSI(str(input_tiff_file), ref=None) + + # let the base module take over from here + super().task() + + def addTiffWSI(self, path: str, ref: Optional[str] = None) -> None: + _path = self._resolvePath(path, ref) + self.v("adding wsi in tiff format with resolved path: ", _path) + assert os.path.isfile(_path) and _path.endswith('.tif'), f"Expect existing tiff file, '{_path}' was given instead." + self._import_paths.append(IDEF( + ref = ref, + path = path, + ftype = FileType.TIFF, + meta = WSI + )) + diff --git a/models/tiger_lb2/utils/TigerLB2Runner.py b/models/tiger_lb2/utils/TigerLB2Runner.py new file mode 100644 index 00000000..884942c7 --- /dev/null +++ b/models/tiger_lb2/utils/TigerLB2Runner.py @@ -0,0 +1,38 @@ +""" +------------------------------------------------ +Mhub / DIAG - Run Module for Tiger LB2 Algorithm +------------------------------------------------ + +------------------------------------------------ +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +------------------------------------------------ +""" +from mhubio.core import Instance, InstanceData, IO, Module + +from pathlib import Path +import numpy as np +import SimpleITK as sitk + +import pipeline.tils_pipeline as tils_pipeline +import algorithm.rw as rw + + +class TigerLB2Runner(Module): + + @IO.Instance() + @IO.Input('in_data', 'tiff', the='input whole slide image Tiff') + @IO.Output('out_data', 'tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') + def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: + wsi_filepath = Path(in_data.abspath) + wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) + + print(f"Input WSI: {wsi_filepath}") + + tils_score_writer = rw.TilsScoreWriter(Path(out_data.abspath)) + tils_score = tils_pipeline.run_tils_pipeline(wsi_mri) + + # write tils score + self.v(f"Writing tils score to {out_data.abspath}") + tils_score_writer.set_tils_score(tils_score=tils_score) + tils_score_writer.save() diff --git a/models/tiger_lb2/utils/__init__.py b/models/tiger_lb2/utils/__init__.py new file mode 100644 index 00000000..722a52e3 --- /dev/null +++ b/models/tiger_lb2/utils/__init__.py @@ -0,0 +1 @@ +from .TigerLB2Runner import * \ No newline at end of file From edcc2e4bc43648d0eb56fd870bdf001a3eadbf57 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 27 Jun 2023 16:48:58 +0200 Subject: [PATCH 02/24] add cuda 11.4/12.0 Dockerfiles and replaced print statement in runner --- .../tiger_lb2/dockerfiles/cuda11.4/Dockerfile | 53 +++++++++++++++++++ .../tiger_lb2/dockerfiles/cuda12.0/Dockerfile | 53 +++++++++++++++++++ .../tiger_lb2/dockerfiles/nocuda/Dockerfile | 3 +- models/tiger_lb2/utils/TigerLB2Runner.py | 2 +- 4 files changed, 108 insertions(+), 3 deletions(-) create mode 100644 models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile create mode 100644 models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile diff --git a/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile b/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile new file mode 100644 index 00000000..eb0ea99f --- /dev/null +++ b/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile @@ -0,0 +1,53 @@ +# Specify the base image for the environment +FROM mhubai/base:cuda11.4 + +# Specify/override authors label +LABEL authors="sil.vandeleemput@radboudumc.nl" + +# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3-openslide \ + && rm -rf /var/lib/apt/lists/* +RUN pip3 install --no-cache-dir panimg + +# install required dependencies for algorithm +RUN pip3 install --no-cache-dir torch==1.10.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html + +# Install ASAP +RUN : \ + && apt-get update \ + && apt-get -y install curl libpython3.8-dev \ + && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ + && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ + && apt-get -f install --fix-missing --fix-broken --assume-yes \ + && ldconfig -v \ + && apt-get clean \ + && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ + && rm ASAP-2.1-py38-Ubuntu2004.deb \ + && : + +# Install tiger LB2 algorithm +# - We use a shallow git clone for reduced bandwidth usage +# - Subsequently we remove the .git directory to procuce a compacter docker layer +RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ + && rm -rf /vuno/.git + +# Install tiger LB2 dependencies +RUN pip3 install --no-cache-dir -r /vuno/requirements.txt + +# Reinstall correct version of Numpy to function with ASAP 2.1 +RUN pip install --no-cache-dir --force-reinstall numpy==1.22 + +# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) +#RUN git init \ +# && git sparse-checkout set "models/tiger_lb2" \ +# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ +# && git merge TODO + +# Add algorithm code base and model code bases to python path +# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts +# to solve this we add /app/models instead of /app to the python path +ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" + +# Default run script +CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile b/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile new file mode 100644 index 00000000..a0eec858 --- /dev/null +++ b/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile @@ -0,0 +1,53 @@ +# Specify the base image for the environment +FROM mhubai/base:cuda12.0 + +# Specify/override authors label +LABEL authors="sil.vandeleemput@radboudumc.nl" + +# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3-openslide \ + && rm -rf /var/lib/apt/lists/* +RUN pip3 install --no-cache-dir panimg + +# install required dependencies for algorithm +RUN pip3 install --no-cache-dir torch==2.0.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html + +# Install ASAP +RUN : \ + && apt-get update \ + && apt-get -y install curl libpython3.8-dev \ + && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ + && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ + && apt-get -f install --fix-missing --fix-broken --assume-yes \ + && ldconfig -v \ + && apt-get clean \ + && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ + && rm ASAP-2.1-py38-Ubuntu2004.deb \ + && : + +# Install tiger LB2 algorithm +# - We use a shallow git clone for reduced bandwidth usage +# - Subsequently we remove the .git directory to procuce a compacter docker layer +RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ + && rm -rf /vuno/.git + +# Install tiger LB2 dependencies +RUN pip3 install --no-cache-dir -r /vuno/requirements.txt + +# Reinstall correct version of Numpy to function with ASAP 2.1 +RUN pip install --no-cache-dir --force-reinstall numpy==1.22 + +# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) +#RUN git init \ +# && git sparse-checkout set "models/tiger_lb2" \ +# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ +# && git merge TODO + +# Add algorithm code base and model code bases to python path +# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts +# to solve this we add /app/models instead of /app to the python path +ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" + +# Default run script +CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile index c629c906..b2d30b39 100644 --- a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile +++ b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip3 install --no-cache-dir panimg # install required dependencies for algorithm (CPU-only) -RUN pip3 install --no-cache-dir torch==1.10.0+cpu torchvision==0.11.0+cpu -f https://download.pytorch.org/whl/torch_stable.html +RUN pip3 install --no-cache-dir torch==1.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html # Install ASAP RUN : \ @@ -42,7 +42,6 @@ RUN pip3 install --no-cache-dir -r /vuno/requirements.txt # Reinstall correct version of Numpy to function with ASAP 2.1 RUN pip install --no-cache-dir --force-reinstall numpy==1.22 - # Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) #RUN git init \ # && git sparse-checkout set "models/tiger_lb2" \ diff --git a/models/tiger_lb2/utils/TigerLB2Runner.py b/models/tiger_lb2/utils/TigerLB2Runner.py index 884942c7..c3360e04 100644 --- a/models/tiger_lb2/utils/TigerLB2Runner.py +++ b/models/tiger_lb2/utils/TigerLB2Runner.py @@ -27,7 +27,7 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData wsi_filepath = Path(in_data.abspath) wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) - print(f"Input WSI: {wsi_filepath}") + self.v(f"Input WSI: {wsi_filepath}") tils_score_writer = rw.TilsScoreWriter(Path(out_data.abspath)) tils_score = tils_pipeline.run_tils_pipeline(wsi_mri) From 19d64c48c2f70d0801cee2499d5c891844afeedf Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 29 Jun 2023 14:22:13 +0200 Subject: [PATCH 03/24] cleanup ASAP install dockerfile line and add matching torchvision versions to torch install --- models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile | 8 +++----- models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile | 8 +++----- models/tiger_lb2/dockerfiles/nocuda/Dockerfile | 8 +++----- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile b/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile index eb0ea99f..95d4d8b6 100644 --- a/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile +++ b/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile @@ -11,11 +11,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip3 install --no-cache-dir panimg # install required dependencies for algorithm -RUN pip3 install --no-cache-dir torch==1.10.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip3 install --no-cache-dir torch==1.10.0+cu113 torchvision==0.11.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html # Install ASAP -RUN : \ - && apt-get update \ +RUN apt-get update \ && apt-get -y install curl libpython3.8-dev \ && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ @@ -23,8 +22,7 @@ RUN : \ && ldconfig -v \ && apt-get clean \ && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb \ - && : + && rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm # - We use a shallow git clone for reduced bandwidth usage diff --git a/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile b/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile index a0eec858..837a9fdd 100644 --- a/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile +++ b/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile @@ -11,11 +11,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip3 install --no-cache-dir panimg # install required dependencies for algorithm -RUN pip3 install --no-cache-dir torch==2.0.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip3 install --no-cache-dir torch==2.0.1+cu118 torchvision==0.15.2+cu118 -f https://download.pytorch.org/whl/torch_stable.html # Install ASAP -RUN : \ - && apt-get update \ +RUN apt-get update \ && apt-get -y install curl libpython3.8-dev \ && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ @@ -23,8 +22,7 @@ RUN : \ && ldconfig -v \ && apt-get clean \ && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb \ - && : + && rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm # - We use a shallow git clone for reduced bandwidth usage diff --git a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile index b2d30b39..ffdb6412 100644 --- a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile +++ b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile @@ -11,11 +11,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip3 install --no-cache-dir panimg # install required dependencies for algorithm (CPU-only) -RUN pip3 install --no-cache-dir torch==1.10.0+cpu -f https://download.pytorch.org/whl/torch_stable.html +RUN pip3 install --no-cache-dir torch==1.10.0+cpu torchvision==0.11.0+cpu -f https://download.pytorch.org/whl/torch_stable.html # Install ASAP -RUN : \ - && apt-get update \ +RUN apt-get update \ && apt-get -y install curl libpython3.8-dev \ && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ @@ -23,8 +22,7 @@ RUN : \ && ldconfig -v \ && apt-get clean \ && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb \ - && : + && rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm # - We use a shallow git clone for reduced bandwidth usage From 21e073bd55148515b1874b26b0f5a293311894a1 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 25 Jul 2023 23:28:14 +0200 Subject: [PATCH 04/24] moved tiger_lb2 -> gc_tiger_lb2, changed Dockerfile to new base img, fixed model weights install, updated pipelines --- .../{tiger_lb2 => gc_tiger_lb2}/__init__.py | 0 models/gc_tiger_lb2/config/default.yml | 24 ++++++++ models/gc_tiger_lb2/config/tiff_pipeline.yml | 21 +++++++ .../dockerfiles}/Dockerfile | 27 ++++----- .../scripts/run.py | 28 ++++------ .../gc_tiger_lb2/scripts/run_tiff_pipeline.py | 37 +++++++++++++ .../utils/PanImgConverters.py | 0 .../utils/TigerLB2Runner.py | 4 +- .../utils/__init__.py | 0 models/tiger_lb2/config/config.yml | 13 ----- .../tiger_lb2/dockerfiles/cuda11.4/Dockerfile | 51 ----------------- .../tiger_lb2/dockerfiles/nocuda/Dockerfile | 55 ------------------- models/tiger_lb2/utils/TiffImporter.py | 50 ----------------- 13 files changed, 109 insertions(+), 201 deletions(-) rename models/{tiger_lb2 => gc_tiger_lb2}/__init__.py (100%) create mode 100644 models/gc_tiger_lb2/config/default.yml create mode 100644 models/gc_tiger_lb2/config/tiff_pipeline.yml rename models/{tiger_lb2/dockerfiles/cuda12.0 => gc_tiger_lb2/dockerfiles}/Dockerfile (65%) rename models/{tiger_lb2 => gc_tiger_lb2}/scripts/run.py (56%) create mode 100644 models/gc_tiger_lb2/scripts/run_tiff_pipeline.py rename models/{tiger_lb2 => gc_tiger_lb2}/utils/PanImgConverters.py (100%) rename models/{tiger_lb2 => gc_tiger_lb2}/utils/TigerLB2Runner.py (84%) rename models/{tiger_lb2 => gc_tiger_lb2}/utils/__init__.py (100%) delete mode 100644 models/tiger_lb2/config/config.yml delete mode 100644 models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile delete mode 100644 models/tiger_lb2/dockerfiles/nocuda/Dockerfile delete mode 100644 models/tiger_lb2/utils/TiffImporter.py diff --git a/models/tiger_lb2/__init__.py b/models/gc_tiger_lb2/__init__.py similarity index 100% rename from models/tiger_lb2/__init__.py rename to models/gc_tiger_lb2/__init__.py diff --git a/models/gc_tiger_lb2/config/default.yml b/models/gc_tiger_lb2/config/default.yml new file mode 100644 index 00000000..e4552c28 --- /dev/null +++ b/models/gc_tiger_lb2/config/default.yml @@ -0,0 +1,24 @@ +general: + data_base_dir: /app/data + version: 1.0 + description: Tiger challenge winner LB2 (dicom:sm to json with Tils score) + +execute: +- DicomImporter +- TiffPanImgConverter +- TigerLB2Runner +- DataOrganizer + +modules: + DicomImporter: + source_dir: input_data + import_dir: sorted_data + sort_data: True + meta: + mod: sm + + DataOrganizer: + target_dir: output_data + require_data_confirmation: true + targets: + - json-->[i:sid]/gc_tiger_lb2_tils_score.json diff --git a/models/gc_tiger_lb2/config/tiff_pipeline.yml b/models/gc_tiger_lb2/config/tiff_pipeline.yml new file mode 100644 index 00000000..4e81857a --- /dev/null +++ b/models/gc_tiger_lb2/config/tiff_pipeline.yml @@ -0,0 +1,21 @@ +general: + data_base_dir: /app/data + version: 1.0 + description: Tiger challenge winner LB2 (tiff:sm to json with Tils score) + +execute: +- FileStructureImporter +- TigerLB2Runner +- DataOrganizer + +modules: + FileStructureImporter: + input_dir: input_data + structures: + - $instanceID@instance/wsi.tif@tiff:mod=sm + + DataOrganizer: + target_dir: output_data + require_data_confirmation: true + targets: + - json-->[i:instanceID]/gc_tiger_lb2_tils_score.json diff --git a/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile similarity index 65% rename from models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile rename to models/gc_tiger_lb2/dockerfiles/Dockerfile index 837a9fdd..d3606fb4 100644 --- a/models/tiger_lb2/dockerfiles/cuda12.0/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -1,15 +1,9 @@ # Specify the base image for the environment -FROM mhubai/base:cuda12.0 +FROM mhubai/base:latest # Specify/override authors label LABEL authors="sil.vandeleemput@radboudumc.nl" -# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3-openslide \ - && rm -rf /var/lib/apt/lists/* -RUN pip3 install --no-cache-dir panimg - # install required dependencies for algorithm RUN pip3 install --no-cache-dir torch==2.0.1+cu118 torchvision==0.15.2+cu118 -f https://download.pytorch.org/whl/torch_stable.html @@ -36,16 +30,23 @@ RUN pip3 install --no-cache-dir -r /vuno/requirements.txt # Reinstall correct version of Numpy to function with ASAP 2.1 RUN pip install --no-cache-dir --force-reinstall numpy==1.22 -# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) +# Download and install model weights file from zenodo +# RUN apt update && apt install bsdtar && rm -rf /var/lib/apt/lists/* +RUN rm -rf /vuno/pretrained_weights && \ + wget https://zenodo.org/record/8112176/files/pretrained_weights.zip -O /vuno/pretrained_weights.zip && \ + unzip /vuno/pretrained_weights.zip -d /vuno && \ + rm /vuno/pretrained_weights.zip + +# Clone MHub model (m-gc-tiger-lb2 branch, fixed to commit TODO) #RUN git init \ -# && git sparse-checkout set "models/tiger_lb2" \ -# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ +# && git sparse-checkout set "models/gc_tiger_lb2" \ +# && git fetch https://github.com/MHubAI/models.git m-gc-tiger-lb2 \ # && git merge TODO # Add algorithm code base and model code bases to python path -# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts +# FIXME yolov5 and /app both have a models directory in their folders which causes import conflicts # to solve this we add /app/models instead of /app to the python path ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" -# Default run script -CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] +# Default run script (cannot use configuration yml file due to import conflicts...) +CMD ["python3", "/app/models/gc_tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/scripts/run.py b/models/gc_tiger_lb2/scripts/run.py similarity index 56% rename from models/tiger_lb2/scripts/run.py rename to models/gc_tiger_lb2/scripts/run.py index 956631ee..cda94d0e 100644 --- a/models/tiger_lb2/scripts/run.py +++ b/models/gc_tiger_lb2/scripts/run.py @@ -1,6 +1,7 @@ """ ------------------------------------------------------- MHub / DIAG - Run HookNet Lung Segmentation Model + WSI Dicom input variant ------------------------------------------------------- ------------------------------------------------------- @@ -14,9 +15,9 @@ from mhubio.core import Config, DataType, FileType from mhubio.modules.organizer.DataOrganizer import DataOrganizer -from tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner -from tiger_lb2.utils.PanImgConverters import TiffPanImgConverter -from tiger_lb2.utils.TiffImporter import TiffImporter +from mhubio.modules.importer.FileStructureImporter import FileStructureImporter +from gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner +from gc_tiger_lb2.utils.PanImgConverters import TiffPanImgConverter # clean-up import shutil @@ -25,23 +26,16 @@ shutil.rmtree("/app/data/output_data", ignore_errors=True) # config -config = Config('/app/models/tiger_lb2/config/config.yml') +config = Config('/app/models/gc_tiger_lb2/config/default.yml') -# TODO could be WSI Dicom input alternatively +# import (sm:dicom) +DicomImporter(config).execute() -# import (wsi:dicom) -# DicomImporter(config).execute() +# convert (sm:dicom -> sm:tiff) +TiffPanImgConverter(config).execute() -# convert (wsi:dicom -> wsi:tiff) -# TiffPanImgConverter(config).execute() - -# import (wsi:tiff) -TiffImporter(config).execute() - -# execute model (wsi:tiff -> json) +# execute model (sm:tiff -> json) TigerLB2Runner(config).execute() # organize data into output folder -organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')) -organizer.setTarget(DataType(FileType.JSON), "/app/data/output_data/[i:sid]/tiger_lb2_tils_score.json") -organizer.execute() +organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')).execute() diff --git a/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py b/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py new file mode 100644 index 00000000..e72d0e92 --- /dev/null +++ b/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py @@ -0,0 +1,37 @@ +""" +------------------------------------------------------- +MHub / DIAG - Run HookNet Lung Segmentation Model + Tiff input variant +------------------------------------------------------- + +------------------------------------------------------- +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +------------------------------------------------------- +""" + +import sys +sys.path.append('.') + +from mhubio.core import Config, DataType, FileType +from mhubio.modules.organizer.DataOrganizer import DataOrganizer +from mhubio.modules.importer.FileStructureImporter import FileStructureImporter +from gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner + +# clean-up +import shutil +shutil.rmtree("/app/data/sorted_data", ignore_errors=True) +shutil.rmtree("/app/tmp", ignore_errors=True) +shutil.rmtree("/app/data/output_data", ignore_errors=True) + +# config +config = Config('/app/models/gc_tiger_lb2/config/tiff_pipeline.yml') + +# import (sm:tiff) +FileStructureImporter(config).execute() + +# execute model (sm:tiff -> json) +TigerLB2Runner(config).execute() + +# organize data into output folder +organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')).execute() diff --git a/models/tiger_lb2/utils/PanImgConverters.py b/models/gc_tiger_lb2/utils/PanImgConverters.py similarity index 100% rename from models/tiger_lb2/utils/PanImgConverters.py rename to models/gc_tiger_lb2/utils/PanImgConverters.py diff --git a/models/tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py similarity index 84% rename from models/tiger_lb2/utils/TigerLB2Runner.py rename to models/gc_tiger_lb2/utils/TigerLB2Runner.py index c3360e04..925e37fb 100644 --- a/models/tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -21,8 +21,8 @@ class TigerLB2Runner(Module): @IO.Instance() - @IO.Input('in_data', 'tiff', the='input whole slide image Tiff') - @IO.Output('out_data', 'tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') + @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') + @IO.Output('out_data', 'gc_tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: wsi_filepath = Path(in_data.abspath) wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) diff --git a/models/tiger_lb2/utils/__init__.py b/models/gc_tiger_lb2/utils/__init__.py similarity index 100% rename from models/tiger_lb2/utils/__init__.py rename to models/gc_tiger_lb2/utils/__init__.py diff --git a/models/tiger_lb2/config/config.yml b/models/tiger_lb2/config/config.yml deleted file mode 100644 index 90396b3a..00000000 --- a/models/tiger_lb2/config/config.yml +++ /dev/null @@ -1,13 +0,0 @@ -general: - data_base_dir: /app/data - -modules: - DicomImporter: - source_dir: input_data - import_dir: sorted_data - sort_data: True - meta: - mod: ct - - TiffImporter: - source_dir: input_data diff --git a/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile b/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile deleted file mode 100644 index 95d4d8b6..00000000 --- a/models/tiger_lb2/dockerfiles/cuda11.4/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# Specify the base image for the environment -FROM mhubai/base:cuda11.4 - -# Specify/override authors label -LABEL authors="sil.vandeleemput@radboudumc.nl" - -# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3-openslide \ - && rm -rf /var/lib/apt/lists/* -RUN pip3 install --no-cache-dir panimg - -# install required dependencies for algorithm -RUN pip3 install --no-cache-dir torch==1.10.0+cu113 torchvision==0.11.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html - -# Install ASAP -RUN apt-get update \ - && apt-get -y install curl libpython3.8-dev \ - && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ - && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ - && apt-get -f install --fix-missing --fix-broken --assume-yes \ - && ldconfig -v \ - && apt-get clean \ - && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb - -# Install tiger LB2 algorithm -# - We use a shallow git clone for reduced bandwidth usage -# - Subsequently we remove the .git directory to procuce a compacter docker layer -RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ - && rm -rf /vuno/.git - -# Install tiger LB2 dependencies -RUN pip3 install --no-cache-dir -r /vuno/requirements.txt - -# Reinstall correct version of Numpy to function with ASAP 2.1 -RUN pip install --no-cache-dir --force-reinstall numpy==1.22 - -# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) -#RUN git init \ -# && git sparse-checkout set "models/tiger_lb2" \ -# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ -# && git merge TODO - -# Add algorithm code base and model code bases to python path -# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts -# to solve this we add /app/models instead of /app to the python path -ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" - -# Default run script -CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile b/models/tiger_lb2/dockerfiles/nocuda/Dockerfile deleted file mode 100644 index ffdb6412..00000000 --- a/models/tiger_lb2/dockerfiles/nocuda/Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -# Specify the base image for the environment -FROM mhubai/base:nocuda - -# Specify/override authors label -LABEL authors="sil.vandeleemput@radboudumc.nl" - -# Install panimg to test conversion integration TODO should later be installed with MHub/mhubio -RUN apt-get update && apt-get install -y --no-install-recommends \ - python3-openslide \ - && rm -rf /var/lib/apt/lists/* -RUN pip3 install --no-cache-dir panimg - -# install required dependencies for algorithm (CPU-only) -RUN pip3 install --no-cache-dir torch==1.10.0+cpu torchvision==0.11.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - -# Install ASAP -RUN apt-get update \ - && apt-get -y install curl libpython3.8-dev \ - && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ - && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ - && apt-get -f install --fix-missing --fix-broken --assume-yes \ - && ldconfig -v \ - && apt-get clean \ - && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb - -# Install tiger LB2 algorithm -# - We use a shallow git clone for reduced bandwidth usage -# - Subsequently we remove the .git directory to procuce a compacter docker layer -RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ - && rm -rf /vuno/.git - -# Modify model loading code to load model weights to the cpu instead of the gpu -RUN sed -i 's/cuda:0/cpu/g' /vuno/detection/detection_inference.py \ - && sed -i 's/torch.load(str(ckpt_fname))/torch.load(str(ckpt_fname), map_location="cpu")/g' /vuno/segmentation/inference.py - -# Install tiger LB2 dependencies -RUN pip3 install --no-cache-dir -r /vuno/requirements.txt - -# Reinstall correct version of Numpy to function with ASAP 2.1 -RUN pip install --no-cache-dir --force-reinstall numpy==1.22 - -# Clone MHub model (m-tiger-lb2 branch, fixed to commit TODO) -#RUN git init \ -# && git sparse-checkout set "models/tiger_lb2" \ -# && git fetch https://github.com/MHubAI/models.git m-tiger-lb2 \ -# && git merge TODO - -# Add algorithm code base and model code bases to python path -# NOTE yolov5 and /app both have a models directory in their folders which causes import conflicts -# to solve this we add /app/models instead of /app to the python path -ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" - -# Default run script -CMD ["python3", "/app/models/tiger_lb2/scripts/run.py"] diff --git a/models/tiger_lb2/utils/TiffImporter.py b/models/tiger_lb2/utils/TiffImporter.py deleted file mode 100644 index 86bd38d5..00000000 --- a/models/tiger_lb2/utils/TiffImporter.py +++ /dev/null @@ -1,50 +0,0 @@ -""" --------------------------------------- -MHub / DIAG - Tiff importer --------------------------------------- - --------------------------------------- -Author: Sil van de Leemput -Email: sil.vandeleemput@radboudumc.nl --------------------------------------- -""" -import os -from typing import Optional -from pathlib import Path - -from mhubio.modules.importer.DataImporter import IDEF, DataImporter, FileType -from mhubio.core import Meta, DirectoryChain - - -# TODO should be moved to mhubio/core/templates.py -WSI = Meta(mod="wsi") - - -class TiffImporter(DataImporter): - def task(self) -> None: - source_dir = self.c['source_dir'] - source_dc = DirectoryChain(path=source_dir, parent=self.config.data.dc) - # input tiff file directory - input_dir = source_dc.abspath - self.v(f"{input_dir}") - - # add input tiff files as WSI images... - self.setBasePath(input_dir) - for input_tiff_file in Path(input_dir).glob("*.tif"): - self.v(f"{input_tiff_file}") - self.addTiffWSI(str(input_tiff_file), ref=None) - - # let the base module take over from here - super().task() - - def addTiffWSI(self, path: str, ref: Optional[str] = None) -> None: - _path = self._resolvePath(path, ref) - self.v("adding wsi in tiff format with resolved path: ", _path) - assert os.path.isfile(_path) and _path.endswith('.tif'), f"Expect existing tiff file, '{_path}' was given instead." - self._import_paths.append(IDEF( - ref = ref, - path = path, - ftype = FileType.TIFF, - meta = WSI - )) - From 7c4fafd667a3ee0b244a43b2cb0c68c8b638286a Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 25 Jul 2023 23:35:15 +0200 Subject: [PATCH 05/24] add DicomImporter import to run.py --- models/gc_tiger_lb2/scripts/run.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/scripts/run.py b/models/gc_tiger_lb2/scripts/run.py index cda94d0e..5355f8b9 100644 --- a/models/gc_tiger_lb2/scripts/run.py +++ b/models/gc_tiger_lb2/scripts/run.py @@ -15,7 +15,7 @@ from mhubio.core import Config, DataType, FileType from mhubio.modules.organizer.DataOrganizer import DataOrganizer -from mhubio.modules.importer.FileStructureImporter import FileStructureImporter +from mhubio.modules.importer.DicomImporter import DicomImporter from gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner from gc_tiger_lb2.utils.PanImgConverters import TiffPanImgConverter From 292a5580739ebef49cd7ec9ae691e32814866e7e Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 25 Jul 2023 23:35:50 +0200 Subject: [PATCH 06/24] add torch cuda assert to tiger_lb2 runner --- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 925e37fb..33df9073 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -17,6 +17,8 @@ import pipeline.tils_pipeline as tils_pipeline import algorithm.rw as rw +import torch + class TigerLB2Runner(Module): @@ -24,6 +26,8 @@ class TigerLB2Runner(Module): @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') @IO.Output('out_data', 'gc_tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: + assert torch.cuda.is_available(), "Error: TigerLB2Runner requires CUDA to be available!" + wsi_filepath = Path(in_data.abspath) wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) From 667454b03fe72d98c3fdecb4ad3d81260aec9e11 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Wed, 26 Jul 2023 22:20:58 +0200 Subject: [PATCH 07/24] Added a fix for the import issue by introducing a CLI and a subprocess call from the pipeline * Fix for the import issue: * Implemented a CLI for the Tiger lb2 runner that takes an input TIFF path and output JSON file path * Integrated the CLI inside the TigerLB2Runner as a subprocess call to the CLI through a fresh Python interpreter * Cleaned the Dockerfile some more * Caveats: * Because of the fix the extra run script for the tiff_pipeline became redundant, can be called directly from the tiff_pipeline.yml through a call of mhubio.run * For the main pipeline the run.py is still required since the TiffPanImgConverter is not automatically picked up * Updated the PanImgConverters, since the returned UnconsumedFilesException e.errors were renamed to e.file_errors --- models/gc_tiger_lb2/dockerfiles/Dockerfile | 9 ++-- models/gc_tiger_lb2/scripts/run.py | 4 +- .../gc_tiger_lb2/scripts/run_tiff_pipeline.py | 37 -------------- models/gc_tiger_lb2/scripts/tiger_lb2_cli.py | 48 +++++++++++++++++++ models/gc_tiger_lb2/utils/PanImgConverters.py | 10 ++-- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 29 ++++++----- models/gc_tiger_lb2/utils/__init__.py | 3 +- 7 files changed, 74 insertions(+), 66 deletions(-) delete mode 100644 models/gc_tiger_lb2/scripts/run_tiff_pipeline.py create mode 100644 models/gc_tiger_lb2/scripts/tiger_lb2_cli.py diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index d3606fb4..14d7f03e 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -31,7 +31,6 @@ RUN pip3 install --no-cache-dir -r /vuno/requirements.txt RUN pip install --no-cache-dir --force-reinstall numpy==1.22 # Download and install model weights file from zenodo -# RUN apt update && apt install bsdtar && rm -rf /var/lib/apt/lists/* RUN rm -rf /vuno/pretrained_weights && \ wget https://zenodo.org/record/8112176/files/pretrained_weights.zip -O /vuno/pretrained_weights.zip && \ unzip /vuno/pretrained_weights.zip -d /vuno && \ @@ -43,10 +42,8 @@ RUN rm -rf /vuno/pretrained_weights && \ # && git fetch https://github.com/MHubAI/models.git m-gc-tiger-lb2 \ # && git merge TODO -# Add algorithm code base and model code bases to python path -# FIXME yolov5 and /app both have a models directory in their folders which causes import conflicts -# to solve this we add /app/models instead of /app to the python path -ENV PYTHONPATH="/vuno:/app/models:/usr/local/lib/python3.8/dist-packages/yolov5/" +# Add model and algorithm code bases to python path +ENV PYTHONPATH="/vuno:/app" -# Default run script (cannot use configuration yml file due to import conflicts...) +# Default run script (cannot use configuration yml file due to panimg converter not ...) CMD ["python3", "/app/models/gc_tiger_lb2/scripts/run.py"] diff --git a/models/gc_tiger_lb2/scripts/run.py b/models/gc_tiger_lb2/scripts/run.py index 5355f8b9..fdb5933d 100644 --- a/models/gc_tiger_lb2/scripts/run.py +++ b/models/gc_tiger_lb2/scripts/run.py @@ -16,8 +16,8 @@ from mhubio.core import Config, DataType, FileType from mhubio.modules.organizer.DataOrganizer import DataOrganizer from mhubio.modules.importer.DicomImporter import DicomImporter -from gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner -from gc_tiger_lb2.utils.PanImgConverters import TiffPanImgConverter +from models.gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner +from models.gc_tiger_lb2.utils.PanImgConverters import TiffPanImgConverter # clean-up import shutil diff --git a/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py b/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py deleted file mode 100644 index e72d0e92..00000000 --- a/models/gc_tiger_lb2/scripts/run_tiff_pipeline.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -------------------------------------------------------- -MHub / DIAG - Run HookNet Lung Segmentation Model - Tiff input variant -------------------------------------------------------- - -------------------------------------------------------- -Author: Sil van de Leemput -Email: sil.vandeleemput@radboudumc.nl -------------------------------------------------------- -""" - -import sys -sys.path.append('.') - -from mhubio.core import Config, DataType, FileType -from mhubio.modules.organizer.DataOrganizer import DataOrganizer -from mhubio.modules.importer.FileStructureImporter import FileStructureImporter -from gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner - -# clean-up -import shutil -shutil.rmtree("/app/data/sorted_data", ignore_errors=True) -shutil.rmtree("/app/tmp", ignore_errors=True) -shutil.rmtree("/app/data/output_data", ignore_errors=True) - -# config -config = Config('/app/models/gc_tiger_lb2/config/tiff_pipeline.yml') - -# import (sm:tiff) -FileStructureImporter(config).execute() - -# execute model (sm:tiff -> json) -TigerLB2Runner(config).execute() - -# organize data into output folder -organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')).execute() diff --git a/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py b/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py new file mode 100644 index 00000000..2771696f --- /dev/null +++ b/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py @@ -0,0 +1,48 @@ +""" +------------------------------------------------ +Mhub / DIAG - CLI Run script for the TIGER LB2 +------------------------------------------------ + +------------------------------------------------ +Author: Sil van de Leemput +Email: sil.vandeleemput@radboudumc.nl +------------------------------------------------ +""" + +import argparse +from pathlib import Path + +import pipeline.tils_pipeline as tils_pipeline +import algorithm.rw as rw + +import torch + + +def tiger_lb2_cli() -> None: + parser = argparse.ArgumentParser("Tiger LB2 Run CLI") + parser.add_argument("input_file", type=str, help="Input WSI TIFF file path") + parser.add_argument("output_file", type=str, help="Output JSON file path") + args = parser.parse_args() + run_tiger_lb2( + wsi_filepath=Path(args.input_file), + output_json_file=Path(args.output_file) + ) + + +def run_tiger_lb2(wsi_filepath: Path, output_json_file: Path) -> None: + if not torch.cuda.is_available(): + raise RuntimeError("run_tiger_lb2 requires CUDA to be available!") + + print(f"Input WSI: {wsi_filepath}") + wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) + + tils_score_writer = rw.TilsScoreWriter(output_json_file) + tils_score = tils_pipeline.run_tils_pipeline(wsi_mri) + + print(f"Writing tils score to {output_json_file}") + tils_score_writer.set_tils_score(tils_score=tils_score) + tils_score_writer.save() + + +if __name__ == "__main__": + tiger_lb2_cli() diff --git a/models/gc_tiger_lb2/utils/PanImgConverters.py b/models/gc_tiger_lb2/utils/PanImgConverters.py index 25dd618e..21b91f5d 100644 --- a/models/gc_tiger_lb2/utils/PanImgConverters.py +++ b/models/gc_tiger_lb2/utils/PanImgConverters.py @@ -68,10 +68,10 @@ def convert(self, instance: Instance) -> Optional[InstanceData]: sitk_image = result.image # SimpleITK image SimpleITK.WriteImage(sitk_image, str(out_mha_file)) except UnconsumedFilesException as e: - # e.errors is keyed with a Path to a file that could not be consumed, + # e.file_errors is keyed with a Path to a file that could not be consumed, # with a list of all the errors found with loading it, # the user can then choose what to do with that information - print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.errors) + print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.file_errors) return None return mha_data @@ -107,16 +107,16 @@ def convert(self, instance: Instance) -> Optional[InstanceData]: else: # run conversion using panimg dcm_input_files = {f for f in inp_dicom_dir.glob("*.dcm") if f.is_file()} - + print(f"Running WSI DICOM -> TIFF conversion on {len(dcm_input_files)} dcm files") try: for result in image_builder_tiff(files=dcm_input_files): tiff_image = result.file # Path to the tiff file shutil.move(str(tiff_image), str(out_tiff_file)) except UnconsumedFilesException as e: - # e.errors is keyed with a Path to a file that could not be consumed, + # e.file_errors is keyed with a Path to a file that could not be consumed, # with a list of all the errors found with loading it, # the user can then choose what to do with that information - print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.errors) + print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.file_errors) return None return tiff_data diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 33df9073..10a95b04 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -13,30 +13,29 @@ from pathlib import Path import numpy as np import SimpleITK as sitk - -import pipeline.tils_pipeline as tils_pipeline -import algorithm.rw as rw - import torch +import subprocess as sp +import sys + class TigerLB2Runner(Module): + CLI_SCRIPT_PATH = Path(__file__).parent.parent / "scripts" / "tiger_lb2_cli.py" + @IO.Instance() @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') @IO.Output('out_data', 'gc_tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: assert torch.cuda.is_available(), "Error: TigerLB2Runner requires CUDA to be available!" - wsi_filepath = Path(in_data.abspath) - wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) - - self.v(f"Input WSI: {wsi_filepath}") - - tils_score_writer = rw.TilsScoreWriter(Path(out_data.abspath)) - tils_score = tils_pipeline.run_tils_pipeline(wsi_mri) + proc = sp.run( + [ + sys.executable, + str(self.CLI_SCRIPT_PATH), + in_data.abspath, + out_data.abspath, + ] + ) - # write tils score - self.v(f"Writing tils score to {out_data.abspath}") - tils_score_writer.set_tils_score(tils_score=tils_score) - tils_score_writer.save() + assert proc.returncode == 0, f"Something went wrong when calling {self.CLI_SCRIPT_PATH}, got return code: {proc.returncode}" diff --git a/models/gc_tiger_lb2/utils/__init__.py b/models/gc_tiger_lb2/utils/__init__.py index 722a52e3..03fd3f6f 100644 --- a/models/gc_tiger_lb2/utils/__init__.py +++ b/models/gc_tiger_lb2/utils/__init__.py @@ -1 +1,2 @@ -from .TigerLB2Runner import * \ No newline at end of file +from .PanImgConverters import * +from .TigerLB2Runner import * From 0cffac0df0f2c22b8e19253139a8a89af21037db Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Wed, 13 Sep 2023 20:51:13 +0200 Subject: [PATCH 08/24] Update and cleanup the tiger lb2 algorithm model code Dockerfile - fixed to commit for tiger_challenge repo - updated MHubAI/models clone stub - updated entrypoint pipeline default.yml - updated the outputs - use the TiffConverter - Added reportexporter template runner: - Added TIL Score as ValueOutput - Confirmed data output - Added/updated comments removed: - custom panimg converter classes - run.py script to run the pipeline --- models/gc_tiger_lb2/config/default.yml | 12 +- models/gc_tiger_lb2/config/tiff_pipeline.yml | 4 +- models/gc_tiger_lb2/dockerfiles/Dockerfile | 20 +-- models/gc_tiger_lb2/scripts/run.py | 41 ------ models/gc_tiger_lb2/scripts/tiger_lb2_cli.py | 16 ++- models/gc_tiger_lb2/utils/PanImgConverters.py | 122 ------------------ models/gc_tiger_lb2/utils/TigerLB2Runner.py | 24 +++- models/gc_tiger_lb2/utils/__init__.py | 1 - 8 files changed, 52 insertions(+), 188 deletions(-) delete mode 100644 models/gc_tiger_lb2/scripts/run.py delete mode 100644 models/gc_tiger_lb2/utils/PanImgConverters.py diff --git a/models/gc_tiger_lb2/config/default.yml b/models/gc_tiger_lb2/config/default.yml index e4552c28..89699456 100644 --- a/models/gc_tiger_lb2/config/default.yml +++ b/models/gc_tiger_lb2/config/default.yml @@ -1,11 +1,11 @@ general: data_base_dir: /app/data version: 1.0 - description: Tiger challenge winner LB2 (dicom:sm to json with Tils score) + description: Tiger challenge winner LB2 (dicom:sm to json with TIL score) execute: - DicomImporter -- TiffPanImgConverter +- TiffConverter - TigerLB2Runner - DataOrganizer @@ -17,8 +17,14 @@ modules: meta: mod: sm + ReportExporter: + includes: + - data: til_score + label: TIL score + value: value + DataOrganizer: target_dir: output_data require_data_confirmation: true targets: - - json-->[i:sid]/gc_tiger_lb2_tils_score.json + - json-->[i:sid]/gc_tiger_lb2_til_score.json diff --git a/models/gc_tiger_lb2/config/tiff_pipeline.yml b/models/gc_tiger_lb2/config/tiff_pipeline.yml index 4e81857a..559dba89 100644 --- a/models/gc_tiger_lb2/config/tiff_pipeline.yml +++ b/models/gc_tiger_lb2/config/tiff_pipeline.yml @@ -1,7 +1,7 @@ general: data_base_dir: /app/data version: 1.0 - description: Tiger challenge winner LB2 (tiff:sm to json with Tils score) + description: Tiger challenge winner LB2 (tiff:sm to json with TIL score) execute: - FileStructureImporter @@ -18,4 +18,4 @@ modules: target_dir: output_data require_data_confirmation: true targets: - - json-->[i:instanceID]/gc_tiger_lb2_tils_score.json + - json-->[i:instanceID]/gc_tiger_lb2_til_score.json diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index 14d7f03e..05efeb8a 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -19,10 +19,11 @@ RUN apt-get update \ && rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm -# - We use a shallow git clone for reduced bandwidth usage +# - Clone tiger LB2 codebase (master branch, fixed to commit 720f8dfca4624792c8e57915c4222efec5a0c2d4) # - Subsequently we remove the .git directory to procuce a compacter docker layer -RUN git clone --depth 1 https://github.com/vuno/tiger_challenge.git /vuno \ - && rm -rf /vuno/.git +RUN git clone https://github.com/vuno/tiger_challenge.git /vuno && \ + cd /vuno && git reset --hard 720f8dfca4624792c8e57915c4222efec5a0c2d4 && \ + rm -rf /vuno/.git # Install tiger LB2 dependencies RUN pip3 install --no-cache-dir -r /vuno/requirements.txt @@ -36,14 +37,15 @@ RUN rm -rf /vuno/pretrained_weights && \ unzip /vuno/pretrained_weights.zip -d /vuno && \ rm /vuno/pretrained_weights.zip -# Clone MHub model (m-gc-tiger-lb2 branch, fixed to commit TODO) -#RUN git init \ +# Clone the main branch of MHubAI/models TODO +#RUN git stash \ # && git sparse-checkout set "models/gc_tiger_lb2" \ -# && git fetch https://github.com/MHubAI/models.git m-gc-tiger-lb2 \ -# && git merge TODO +# && git fetch https://github.com/MHubAI/models.git main \ +# && git merge FETCH_HEAD # Add model and algorithm code bases to python path ENV PYTHONPATH="/vuno:/app" -# Default run script (cannot use configuration yml file due to panimg converter not ...) -CMD ["python3", "/app/models/gc_tiger_lb2/scripts/run.py"] +# Set default entrypoint +ENTRYPOINT ["python3", "-m", "mhubio.run"] +CMD ["--config", "/app/models/gc_tiger_lb2/config/default.yml"] diff --git a/models/gc_tiger_lb2/scripts/run.py b/models/gc_tiger_lb2/scripts/run.py deleted file mode 100644 index fdb5933d..00000000 --- a/models/gc_tiger_lb2/scripts/run.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -------------------------------------------------------- -MHub / DIAG - Run HookNet Lung Segmentation Model - WSI Dicom input variant -------------------------------------------------------- - -------------------------------------------------------- -Author: Sil van de Leemput -Email: sil.vandeleemput@radboudumc.nl -------------------------------------------------------- -""" - -import sys -sys.path.append('.') - -from mhubio.core import Config, DataType, FileType -from mhubio.modules.organizer.DataOrganizer import DataOrganizer -from mhubio.modules.importer.DicomImporter import DicomImporter -from models.gc_tiger_lb2.utils.TigerLB2Runner import TigerLB2Runner -from models.gc_tiger_lb2.utils.PanImgConverters import TiffPanImgConverter - -# clean-up -import shutil -shutil.rmtree("/app/data/sorted_data", ignore_errors=True) -shutil.rmtree("/app/tmp", ignore_errors=True) -shutil.rmtree("/app/data/output_data", ignore_errors=True) - -# config -config = Config('/app/models/gc_tiger_lb2/config/default.yml') - -# import (sm:dicom) -DicomImporter(config).execute() - -# convert (sm:dicom -> sm:tiff) -TiffPanImgConverter(config).execute() - -# execute model (sm:tiff -> json) -TigerLB2Runner(config).execute() - -# organize data into output folder -organizer = DataOrganizer(config, set_file_permissions=sys.platform.startswith('linux')).execute() diff --git a/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py b/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py index 2771696f..e6bdb9ec 100644 --- a/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py +++ b/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py @@ -1,22 +1,24 @@ """ ------------------------------------------------- -Mhub / DIAG - CLI Run script for the TIGER LB2 ------------------------------------------------- +-------------------------------------------------------- +Mhub / DIAG - CLI Run script for the TIGER LB2 Algorithm +-------------------------------------------------------- ------------------------------------------------- +-------------------------------------------------------- Author: Sil van de Leemput Email: sil.vandeleemput@radboudumc.nl ------------------------------------------------- +-------------------------------------------------------- """ import argparse from pathlib import Path +import torch + +# The required pipeline methods are imported from the tiger_challenge repository +# The algorithm.rw module is imported for IO operations import pipeline.tils_pipeline as tils_pipeline import algorithm.rw as rw -import torch - def tiger_lb2_cli() -> None: parser = argparse.ArgumentParser("Tiger LB2 Run CLI") diff --git a/models/gc_tiger_lb2/utils/PanImgConverters.py b/models/gc_tiger_lb2/utils/PanImgConverters.py deleted file mode 100644 index 21b91f5d..00000000 --- a/models/gc_tiger_lb2/utils/PanImgConverters.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -------------------------------------------------------------- -MHub - PanImg Conversion Modules Dicom2Mha and WSI-Dicom2Tiff -------------------------------------------------------------- - -------------------------------------------------------------- -Author: Sil van de Leemput -Email: sil.vandeleemput@radboudumc.nl -------------------------------------------------------------- -""" - - -from typing import Optional - -from mhubio.modules.convert.DataConverter import DataConverter -from mhubio.core import Instance, InstanceData, DataType, FileType - -import os -from pathlib import Path -import shutil - -from panimg.exceptions import UnconsumedFilesException -from panimg.image_builders.dicom import image_builder_dicom -from panimg.image_builders.tiff import image_builder_tiff -from panimg.image_builders.metaio_nrrd import image_builder_nrrd - -import SimpleITK - - -class MhaPanImgConverter(DataConverter): - """ - Conversion module. - Convert instance data from dicom or nrrd to mha. - """ - - def convert(self, instance: Instance) -> Optional[InstanceData]: - - # create a converted instance - has_instance_dicom = instance.hasType(DataType(FileType.DICOM)) - has_instance_nrrd = instance.hasType(DataType(FileType.NRRD)) - - assert has_instance_dicom or has_instance_nrrd, f"CONVERT ERROR: required datatype (dicom or nrrd) not available in instance {str(instance)}." - - # select input data, dicom has priority over nrrd - input_data = instance.data.filter(DataType(FileType.DICOM) if has_instance_dicom else DataType(FileType.NRRD)).first() - - # out data - mha_data = InstanceData("image.mha", DataType(FileType.MHA, input_data.type.meta)) - mha_data.instance = instance - - # paths - inp_data_dir = Path(input_data.abspath) - out_mha_file = Path(mha_data.abspath) - - # sanity check - assert(inp_data_dir.is_dir()) - - # DICOM CT to MHA conversion (if the file doesn't exist yet) - if out_mha_file.is_file(): - print("CONVERT ERROR: File already exists: ", out_mha_file) - return None - else: - # run conversion using panimg - input_files = {f for f in inp_data_dir.glob(["*.nrrd", "*.dcm"][has_instance_dicom]) if f.is_file()} - img_builder = image_builder_dicom if has_instance_dicom else image_builder_nrrd - try: - for result in img_builder(files=input_files): - sitk_image = result.image # SimpleITK image - SimpleITK.WriteImage(sitk_image, str(out_mha_file)) - except UnconsumedFilesException as e: - # e.file_errors is keyed with a Path to a file that could not be consumed, - # with a list of all the errors found with loading it, - # the user can then choose what to do with that information - print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.file_errors) - return None - - return mha_data - - -class TiffPanImgConverter(DataConverter): - """ - Conversion module. - Convert instance data from WSI-dicom to tiff. - """ - - def convert(self, instance: Instance) -> Optional[InstanceData]: - - # create a converted instance - assert instance.hasType(DataType(FileType.DICOM)), f"CONVERT ERROR: required datatype (dicom) not available in instance {str(instance)}." - dicom_data = instance.data.filter(DataType(FileType.DICOM)).first() - - # out data - tiff_data = InstanceData("image.tiff", DataType(FileType.TIFF, dicom_data.type.meta)) - tiff_data.instance = instance - - # paths - inp_dicom_dir = Path(dicom_data.abspath) - out_tiff_file = Path(tiff_data.abspath) - - # sanity check - assert(inp_dicom_dir.is_dir()) - - # WSI-DICOM to TIFF conversion (if the file doesn't exist yet) - if out_tiff_file.is_file(): - print("CONVERT ERROR: File already exists: ", out_tiff_file) - return None - else: - # run conversion using panimg - dcm_input_files = {f for f in inp_dicom_dir.glob("*.dcm") if f.is_file()} - print(f"Running WSI DICOM -> TIFF conversion on {len(dcm_input_files)} dcm files") - try: - for result in image_builder_tiff(files=dcm_input_files): - tiff_image = result.file # Path to the tiff file - shutil.move(str(tiff_image), str(out_tiff_file)) - except UnconsumedFilesException as e: - # e.file_errors is keyed with a Path to a file that could not be consumed, - # with a list of all the errors found with loading it, - # the user can then choose what to do with that information - print("CONVERT ERROR: UnconsumedFilesException during PanImg conversion: ", e.file_errors) - return None - - return tiff_data diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 10a95b04..d6edd834 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -8,7 +8,7 @@ Email: sil.vandeleemput@radboudumc.nl ------------------------------------------------ """ -from mhubio.core import Instance, InstanceData, IO, Module +from mhubio.core import Instance, InstanceData, IO, Module, ValueOutput, Meta from pathlib import Path import numpy as np @@ -17,6 +17,16 @@ import subprocess as sp import sys +import json + + +@ValueOutput.Name('til_score') +@ValueOutput.Meta(Meta(key="value")) +@ValueOutput.Label('TIL score') +@ValueOutput.Type(float) +@ValueOutput.Description('percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0 (percent) to 100 (percent).') +class TilScoreOutput(ValueOutput): + pass class TigerLB2Runner(Module): @@ -25,10 +35,12 @@ class TigerLB2Runner(Module): @IO.Instance() @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') - @IO.Output('out_data', 'gc_tiger_lb2_tils_score.json', 'json:model=TigerLB2TilsScore', 'in_data', the='TIGER LB2 Tils score') - def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData) -> None: + @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TILScore', 'in_data', the='TIGER LB2 TIL score') + @IO.OutputData('til_score', TilScoreOutput, data='in_data', the='TIGER LB2 TIL score - percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0-100 (percent).') + def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, til_score: TilScoreOutput) -> None: assert torch.cuda.is_available(), "Error: TigerLB2Runner requires CUDA to be available!" + # Execute the Tiger LB2 Algorithm through a Python subprocess proc = sp.run( [ sys.executable, @@ -39,3 +51,9 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData ) assert proc.returncode == 0, f"Something went wrong when calling {self.CLI_SCRIPT_PATH}, got return code: {proc.returncode}" + out_data.confirm() + + # export output til score as data as well + with open(out_data.abspath, "r") as f: + til_score.value = json.load(f) + assert isinstanceof(til_score.value, float) diff --git a/models/gc_tiger_lb2/utils/__init__.py b/models/gc_tiger_lb2/utils/__init__.py index 03fd3f6f..bac1a078 100644 --- a/models/gc_tiger_lb2/utils/__init__.py +++ b/models/gc_tiger_lb2/utils/__init__.py @@ -1,2 +1 @@ -from .PanImgConverters import * from .TigerLB2Runner import * From 169a5fef17106221d73ff11ae8f29d20c92b7010 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 14 Sep 2023 11:41:13 +0200 Subject: [PATCH 09/24] fix issue with last assert runner, re-enable ReportExporter --- models/gc_tiger_lb2/config/default.yml | 1 + models/gc_tiger_lb2/utils/TigerLB2Runner.py | 5 ++--- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/models/gc_tiger_lb2/config/default.yml b/models/gc_tiger_lb2/config/default.yml index 89699456..d92964e9 100644 --- a/models/gc_tiger_lb2/config/default.yml +++ b/models/gc_tiger_lb2/config/default.yml @@ -7,6 +7,7 @@ execute: - DicomImporter - TiffConverter - TigerLB2Runner +- ReportExporter - DataOrganizer modules: diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index d6edd834..f7d7a364 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -23,7 +23,7 @@ @ValueOutput.Name('til_score') @ValueOutput.Meta(Meta(key="value")) @ValueOutput.Label('TIL score') -@ValueOutput.Type(float) +@ValueOutput.Type(int) @ValueOutput.Description('percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0 (percent) to 100 (percent).') class TilScoreOutput(ValueOutput): pass @@ -51,9 +51,8 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData ) assert proc.returncode == 0, f"Something went wrong when calling {self.CLI_SCRIPT_PATH}, got return code: {proc.returncode}" - out_data.confirm() # export output til score as data as well with open(out_data.abspath, "r") as f: til_score.value = json.load(f) - assert isinstanceof(til_score.value, float) + assert isinstance(til_score.value, int) From 982db19ec84ce92954b28fd0d30e5fbe9a1898a9 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Sat, 7 Oct 2023 11:52:41 +0200 Subject: [PATCH 10/24] replace subprocess call with self.subprocess, replace asserts with explicit errors --- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index f7d7a364..f5503ba3 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -8,14 +8,13 @@ Email: sil.vandeleemput@radboudumc.nl ------------------------------------------------ """ -from mhubio.core import Instance, InstanceData, IO, Module, ValueOutput, Meta +from mhubio.core import Instance, InstanceData, IO, Module, ValueOutput, Meta, DataType, FileType from pathlib import Path import numpy as np import SimpleITK as sitk import torch -import subprocess as sp import sys import json @@ -38,10 +37,11 @@ class TigerLB2Runner(Module): @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TILScore', 'in_data', the='TIGER LB2 TIL score') @IO.OutputData('til_score', TilScoreOutput, data='in_data', the='TIGER LB2 TIL score - percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0-100 (percent).') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, til_score: TilScoreOutput) -> None: - assert torch.cuda.is_available(), "Error: TigerLB2Runner requires CUDA to be available!" + if not torch.cuda.is_available(): + raise NotImplementedError("TigerLB2Runner requires CUDA to be available!") # Execute the Tiger LB2 Algorithm through a Python subprocess - proc = sp.run( + self.subprocess( [ sys.executable, str(self.CLI_SCRIPT_PATH), @@ -50,7 +50,8 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData ] ) - assert proc.returncode == 0, f"Something went wrong when calling {self.CLI_SCRIPT_PATH}, got return code: {proc.returncode}" + if not Path(out_data.abspath).is_file(): + raise OSError(f"Something went wrong when calling {self.CLI_SCRIPT_PATH} as a subprocess, couldn't find output file: {out_data.abspath}") # export output til score as data as well with open(out_data.abspath, "r") as f: From 062cb02be2a9ddfeaffbbbda1cc661daefff6023 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 19 Oct 2023 16:03:49 +0200 Subject: [PATCH 11/24] add basic meta.json --- models/gc_tiger_lb2/meta.json | 113 ++++++++++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 models/gc_tiger_lb2/meta.json diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json new file mode 100644 index 00000000..5650bafc --- /dev/null +++ b/models/gc_tiger_lb2/meta.json @@ -0,0 +1,113 @@ +{ + "id": "c5397909-0397-489f-8744-6bf3952e9a1c", + "name": "tiger_lb2", + "title": "TIGER challenge winner: Team VUNO", + "summary": { + "description": "Participants in the TIGER challenge will have to develop computer algorithms to analyze H&E-stained whole-slide images of breast cancer histopathology, to perform three tasks: detection of lymphocytes and plasma cells, which are the main types of cells considered as tumor-infiltrating lymphocytes; segmentation of invasive tumor and tumor-associated stroma, which are the main tissue compartments considered when identifying relevant regions for the TILs; compute an automated TILs score, one score per slide, based on the output of detection and segmentation.", + "inputs": [ + { + "label": "Whole-slide image", + "description": "H&E-stained whole-slide image of breast cancer histopathology", + "format": "DICOM", + "modality": "SM", + "bodypartexamined": "Breast", + "slicethickness": "", + "non-contrast": false, + "contrast": false + } + ], + "outputs": [ + { + "type": "Prediction", + "valueType": "Probability", + "label": "TIL score", + "description": "Percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0 (percent) to 100 (percent).", + "classes": [] + } + ], + "model": { + "architecture": "Combination of multiple U-Nets with EfficientNet B2/B0 encoders, and YOLOv5 networks for detection.", + "training": "supervised", + "cmpapproach": "2D" + }, + "data": { + "training": { + "vol_samples": 230 + }, + "evaluation": { + "vol_samples": 58 + }, + "public": false, + "external": false + } + }, + "details": { + "name": "LB2", + "version": "55c49c9e-4216-4142-b1c8-f5d85781add3", + "devteam": "VUNO", + "type": "Segmentation/Classification hybrid", + "date": { + "weights": "2023-07-06", + "code": "2023-07-06", + "pub": "2022-08-26" + }, + "cite": "", + "license": { + "code": "Apache 2.0", + "weights": "CC BY-NC 4.0" + }, + "publications": [], + "github": "https://github.com/vuno/tiger_challenge", + "zenodo": "https://doi.org/10.5281/zenodo.8112147", + "colab": "", + "slicer": false + }, + "info": { + "use": { + "title": "Intended use", + "text": "Prediction of the percentage of stomal area covered by tumour infiltrating lymphocytes on H&E-stained whole-slide image of breast cancer histopathology.", + "references": [], + "tables": [] + }, + "analyses": { + "title": "Evaluation", + "text": "The prognostic value of the automatic \"TIL score\" generated by the submitted algorithms were computed for the test set. This was done by building a multivariate Cox regression model trained with predefined clinical variables and the produced TILs score. The concordance index (Uno’s C-index) of this model was computed and the algorithms were ranked based on its value. ", + "references": [ + { + "label": "On the C-statistics for Evaluating Overall Adequacy of Risk Prediction Procedures with Censored Survival Data", + "uri": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3079915/" + } + ], + "tables": [] + }, + "evaluation": { + "title": "Evaluation data", + "text": "The test set consists of a separate dataset of n=707 H&E-stained whole-slide breast cancer histopathology images", + "references": [], + "tables": [] + }, + "training": { + "title": "Training data", + "text": "For the TIGER challenge three public training datasets were made available: 1. WSIROIS: Whole-slide images with manual annotations in regions of interest 2. WSIBULK: Whole-slide images with coarse manual annotation of the tumor bulk 3. WSITILS: Whole-slide images with visual estimation of the TILs at slide level", + "references": [ + { + "label": "WSIROIS, WSIBULK, WSITILS on AWS Open Data", + "uri": "https://registry.opendata.aws/tiger/" + } + ], + "tables": [] + }, + "ethics": { + "title": "", + "text": "", + "references": [], + "tables": [] + }, + "limitations": { + "title": "Limitations", + "text": "This algorithm was developed for research purposes only.", + "references": [], + "tables": [] + } + } +} \ No newline at end of file From ca1a45044066d3c9153a888ab18e4f23892f9b40 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 24 Oct 2023 16:48:53 +0200 Subject: [PATCH 12/24] add minimum panimg pip install fix for wsi dicom conversion --- models/gc_tiger_lb2/dockerfiles/Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index 05efeb8a..ef5f8fff 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -29,7 +29,10 @@ RUN git clone https://github.com/vuno/tiger_challenge.git /vuno && \ RUN pip3 install --no-cache-dir -r /vuno/requirements.txt # Reinstall correct version of Numpy to function with ASAP 2.1 -RUN pip install --no-cache-dir --force-reinstall numpy==1.22 +RUN pip3 install --no-cache-dir --force-reinstall numpy==1.22 + +# Enforce minimum version of panimg (with WSI fix) +RUN pip3 install --no-cache-dir panimg>=0.13.2 # Download and install model weights file from zenodo RUN rm -rf /vuno/pretrained_weights && \ From af0287ba26edf49cbb86097f3b2a7f0d128fd212 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 24 Oct 2023 16:55:41 +0200 Subject: [PATCH 13/24] changed algorithm details type --- models/gc_tiger_lb2/meta.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json index 5650bafc..615c0ff9 100644 --- a/models/gc_tiger_lb2/meta.json +++ b/models/gc_tiger_lb2/meta.json @@ -45,7 +45,7 @@ "name": "LB2", "version": "55c49c9e-4216-4142-b1c8-f5d85781add3", "devteam": "VUNO", - "type": "Segmentation/Classification hybrid", + "type": "Segmentation/Prediction hybrid", "date": { "weights": "2023-07-06", "code": "2023-07-06", From a27dad50fe07be309bcaa1e815681d1e38d931ef Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 23 Nov 2023 22:39:28 +0100 Subject: [PATCH 14/24] add import mhub model definition lines, remove panimg pip install, remove first line Dockerfile --- models/gc_tiger_lb2/dockerfiles/Dockerfile | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index ef5f8fff..f2d867b5 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -1,4 +1,3 @@ -# Specify the base image for the environment FROM mhubai/base:latest # Specify/override authors label @@ -31,20 +30,15 @@ RUN pip3 install --no-cache-dir -r /vuno/requirements.txt # Reinstall correct version of Numpy to function with ASAP 2.1 RUN pip3 install --no-cache-dir --force-reinstall numpy==1.22 -# Enforce minimum version of panimg (with WSI fix) -RUN pip3 install --no-cache-dir panimg>=0.13.2 - # Download and install model weights file from zenodo RUN rm -rf /vuno/pretrained_weights && \ wget https://zenodo.org/record/8112176/files/pretrained_weights.zip -O /vuno/pretrained_weights.zip && \ unzip /vuno/pretrained_weights.zip -d /vuno && \ rm /vuno/pretrained_weights.zip -# Clone the main branch of MHubAI/models TODO -#RUN git stash \ -# && git sparse-checkout set "models/gc_tiger_lb2" \ -# && git fetch https://github.com/MHubAI/models.git main \ -# && git merge FETCH_HEAD +# Import the MHub model definiton +ARG MHUB_MODELS_REPO +RUN buildutils/import_mhub_model.sh gc_tiger_lb2 ${MHUB_MODELS_REPO} # Add model and algorithm code bases to python path ENV PYTHONPATH="/vuno:/app" From 1965629b5c1b62b8ce6b1fc5a1b7d44f5e5de168 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 23 Nov 2023 22:45:59 +0100 Subject: [PATCH 15/24] modified meta line --- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index f5503ba3..71abef28 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -20,7 +20,7 @@ @ValueOutput.Name('til_score') -@ValueOutput.Meta(Meta(key="value")) +@ValueOutput.Meta(Meta(scale="percent", min=0, max=100)) @ValueOutput.Label('TIL score') @ValueOutput.Type(int) @ValueOutput.Description('percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0 (percent) to 100 (percent).') From a82e538d721a299bd447d3963b2209d6aaa3d155 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Mon, 22 Jan 2024 11:34:42 +0100 Subject: [PATCH 16/24] meta.json - fix typo and match model name --- models/gc_tiger_lb2/meta.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json index 615c0ff9..cee33db9 100644 --- a/models/gc_tiger_lb2/meta.json +++ b/models/gc_tiger_lb2/meta.json @@ -1,6 +1,6 @@ { "id": "c5397909-0397-489f-8744-6bf3952e9a1c", - "name": "tiger_lb2", + "name": "gc_tiger_lb2", "title": "TIGER challenge winner: Team VUNO", "summary": { "description": "Participants in the TIGER challenge will have to develop computer algorithms to analyze H&E-stained whole-slide images of breast cancer histopathology, to perform three tasks: detection of lymphocytes and plasma cells, which are the main types of cells considered as tumor-infiltrating lymphocytes; segmentation of invasive tumor and tumor-associated stroma, which are the main tissue compartments considered when identifying relevant regions for the TILs; compute an automated TILs score, one score per slide, based on the output of detection and segmentation.", @@ -65,7 +65,7 @@ "info": { "use": { "title": "Intended use", - "text": "Prediction of the percentage of stomal area covered by tumour infiltrating lymphocytes on H&E-stained whole-slide image of breast cancer histopathology.", + "text": "Prediction of the percentage of stromal area covered by tumour infiltrating lymphocytes on H&E-stained whole-slide image of breast cancer histopathology.", "references": [], "tables": [] }, From 59c97c7d35039d3571857855f420463e85f2b2ff Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 5 Mar 2024 15:56:38 +0100 Subject: [PATCH 17/24] fix dynamic modality in default.yml, improve error message in runner #38 --- models/gc_tiger_lb2/config/default.yml | 2 +- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/models/gc_tiger_lb2/config/default.yml b/models/gc_tiger_lb2/config/default.yml index d92964e9..354cdf32 100644 --- a/models/gc_tiger_lb2/config/default.yml +++ b/models/gc_tiger_lb2/config/default.yml @@ -16,7 +16,7 @@ modules: import_dir: sorted_data sort_data: True meta: - mod: sm + mod: "%Modality" ReportExporter: includes: diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 71abef28..0f1d2137 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -50,8 +50,12 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData ] ) + # Validate that the required output file was generated by the subprocess if not Path(out_data.abspath).is_file(): - raise OSError(f"Something went wrong when calling {self.CLI_SCRIPT_PATH} as a subprocess, couldn't find output file: {out_data.abspath}") + raise FileNotFoundError( + f"Couldn't find expected output file: `{out_data.abspath}`. " + f"The subprocess `{self.CLI_SCRIPT_PATH}` did not generate the required output file." + ) # export output til score as data as well with open(out_data.abspath, "r") as f: From 53e9e5270a3952ca8893ede37768fbbcf857a736 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Tue, 5 Mar 2024 15:59:48 +0100 Subject: [PATCH 18/24] meta.json - change version to 0.1.0 --- models/gc_tiger_lb2/meta.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json index cee33db9..214fc70f 100644 --- a/models/gc_tiger_lb2/meta.json +++ b/models/gc_tiger_lb2/meta.json @@ -43,7 +43,7 @@ }, "details": { "name": "LB2", - "version": "55c49c9e-4216-4142-b1c8-f5d85781add3", + "version": "0.1.0", "devteam": "VUNO", "type": "Segmentation/Prediction hybrid", "date": { From 8ea1ba9cd0491421901c6859a9c1d3fd9070e8a5 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 7 Mar 2024 12:35:10 +0100 Subject: [PATCH 19/24] meta.json - modified, updated, and extended --- models/gc_tiger_lb2/meta.json | 54 ++++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json index 214fc70f..7b78fbd2 100644 --- a/models/gc_tiger_lb2/meta.json +++ b/models/gc_tiger_lb2/meta.json @@ -3,7 +3,7 @@ "name": "gc_tiger_lb2", "title": "TIGER challenge winner: Team VUNO", "summary": { - "description": "Participants in the TIGER challenge will have to develop computer algorithms to analyze H&E-stained whole-slide images of breast cancer histopathology, to perform three tasks: detection of lymphocytes and plasma cells, which are the main types of cells considered as tumor-infiltrating lymphocytes; segmentation of invasive tumor and tumor-associated stroma, which are the main tissue compartments considered when identifying relevant regions for the TILs; compute an automated TILs score, one score per slide, based on the output of detection and segmentation.", + "description": "This algorithm predicts the percentage of stromal area covered by tumour infiltrating lymphocytes (TIL) on H&E-stained whole-slide images of breast cancer histopathology. This algorithm first segments invasive tumor and tumor-associated stroma and subsequently detects lymphocytes and plasma cells, which are finally combined to estimate the relevant regions for TIL estimation. This model algorithm was the challenge winner of the TIGER challenge hosted on Grand Challenge.", "inputs": [ { "label": "Whole-slide image", @@ -32,10 +32,10 @@ }, "data": { "training": { - "vol_samples": 230 + "vol_samples": 277 }, "evaluation": { - "vol_samples": 58 + "vol_samples": 707 }, "public": false, "external": false @@ -56,7 +56,12 @@ "code": "Apache 2.0", "weights": "CC BY-NC 4.0" }, - "publications": [], + "publications": [ + { + "uri": "https://github.com/vuno/tiger_challenge/blob/720f8dfca4624792c8e57915c4222efec5a0c2d4/figure/method_description.pdf", + "title": "TIGER Challenge - Description of the Developed Algorithm - Team VUNO" + } + ], "github": "https://github.com/vuno/tiger_challenge", "zenodo": "https://doi.org/10.5281/zenodo.8112147", "colab": "", @@ -65,33 +70,54 @@ "info": { "use": { "title": "Intended use", - "text": "Prediction of the percentage of stromal area covered by tumour infiltrating lymphocytes on H&E-stained whole-slide image of breast cancer histopathology.", - "references": [], + "text": "This algorithm is a classification method using a variety of deep-learning techniques to predict the percentage of stromal area covered by tumour infiltrating lymphocytes on H&E-stained whole-slide image of breast cancer histopathology. The algorithm can also be found on Grand Challenge [1] (access upon request), where it participated and won the TIGER challenge.", + "references": [ + { + "label": "TIGER challenge winner algorithm on Grand Challenge (access upon request)", + "uri": "https://grand-challenge.org/algorithms/tiger-algorithm-lb2" + } + ], "tables": [] }, "analyses": { "title": "Evaluation", - "text": "The prognostic value of the automatic \"TIL score\" generated by the submitted algorithms were computed for the test set. This was done by building a multivariate Cox regression model trained with predefined clinical variables and the produced TILs score. The concordance index (Uno’s C-index) of this model was computed and the algorithms were ranked based on its value. ", + "text": "Patient-level diagnosis performance is evaluated using the \"TIL score\". The diagnostic performance of the algorithms were estimated using a multivariate Cox regression model trained with predefined clinical variables and the produced TIL scores. Based on this regression model the concordance index (Uno’s C-index) was computed to rank this algorithm.", "references": [ { - "label": "On the C-statistics for Evaluating Overall Adequacy of Risk Prediction Procedures with Censored Survival Data", - "uri": "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3079915/" + "label": "TIGER challenge details", + "uri": "https://tiger.grand-challenge.org" + }, + { + "label": "TIGER team VUNO algorithm final evaluation results on Grand Challenge.", + "uri": "https://tiger.grand-challenge.org/evaluation/survival-final-evaluation/leaderboard/" } ], - "tables": [] + "tables": [ + { + "label": "Evaluation results on the TIGER testing cohort of 707 WSI cases.", + "entries": { + "Uno's Concordance-index": "0.6388" + } + } + ] }, "evaluation": { "title": "Evaluation data", - "text": "The test set consists of a separate dataset of n=707 H&E-stained whole-slide breast cancer histopathology images", - "references": [], + "text": "The test set consists of a separate hidden dataset of n=707 H&E-stained whole-slide breast cancer histopathology images.", + "references": [ + { + "label": "TIGER data section", + "uri": "https://tiger.grand-challenge.org/Data/" + } + ], "tables": [] }, "training": { "title": "Training data", - "text": "For the TIGER challenge three public training datasets were made available: 1. WSIROIS: Whole-slide images with manual annotations in regions of interest 2. WSIBULK: Whole-slide images with coarse manual annotation of the tumor bulk 3. WSITILS: Whole-slide images with visual estimation of the TILs at slide level", + "text": "This algorithm was trained using two public training datasets: 1. WSIROIS: Whole-slide images with manual annotations in regions of interest. 2. WSITILS: Whole-slide images with visual estimation of the TILs at slide level.", "references": [ { - "label": "WSIROIS, WSIBULK, WSITILS on AWS Open Data", + "label": "WSIROIS, WSITILS on AWS Open Data", "uri": "https://registry.opendata.aws/tiger/" } ], From b973f7d850dd7799acc5f1262d7c609ccfd6c1d9 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 7 Mar 2024 12:35:34 +0100 Subject: [PATCH 20/24] Dockerfile - add reproducibility fix --- models/gc_tiger_lb2/dockerfiles/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index f2d867b5..edb201bd 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -20,9 +20,11 @@ RUN apt-get update \ # Install tiger LB2 algorithm # - Clone tiger LB2 codebase (master branch, fixed to commit 720f8dfca4624792c8e57915c4222efec5a0c2d4) # - Subsequently we remove the .git directory to procuce a compacter docker layer +# - We also fix a reproducibility issue with the segmentation prediction by fixing the torch random seed RUN git clone https://github.com/vuno/tiger_challenge.git /vuno && \ cd /vuno && git reset --hard 720f8dfca4624792c8e57915c4222efec5a0c2d4 && \ - rm -rf /vuno/.git + rm -rf /vuno/.git && \ + sed -i "238i \ torch.manual_seed(0)" /vuno/segmentation/inference.py # Install tiger LB2 dependencies RUN pip3 install --no-cache-dir -r /vuno/requirements.txt From fa73a62fd76cf1613e48064d2f4b4e79c35a4cbf Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 7 Mar 2024 16:20:58 +0100 Subject: [PATCH 21/24] Dockerfile, runner, cli - added pipenv for model algorithm #38 --- models/gc_tiger_lb2/dockerfiles/Dockerfile | 44 +++++++++++-------- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 16 +++---- .../tiger_lb2_cli.py => utils/cli.py} | 16 ++++--- 3 files changed, 41 insertions(+), 35 deletions(-) rename models/gc_tiger_lb2/{scripts/tiger_lb2_cli.py => utils/cli.py} (72%) diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index edb201bd..e168c625 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -3,19 +3,21 @@ FROM mhubai/base:latest # Specify/override authors label LABEL authors="sil.vandeleemput@radboudumc.nl" -# install required dependencies for algorithm -RUN pip3 install --no-cache-dir torch==2.0.1+cu118 torchvision==0.15.2+cu118 -f https://download.pytorch.org/whl/torch_stable.html - -# Install ASAP -RUN apt-get update \ - && apt-get -y install curl libpython3.8-dev \ - && curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" \ - && dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true \ - && apt-get -f install --fix-missing --fix-broken --assume-yes \ - && ldconfig -v \ - && apt-get clean \ - && echo "/opt/ASAP/bin" > /usr/local/lib/python3.8/dist-packages/asap.pth \ - && rm ASAP-2.1-py38-Ubuntu2004.deb +# Install pipenv (for a custom Python/Pip environment for ASAP-2.1 and the other algorithm requirements) +RUN pip3 install --no-cache-dir pipenv + +# Set environment variables for pipenv (installs into /app/.venv) +ENV PIPENV_VENV_IN_PROJECT=1 + +# Install ASAP 2.1 +RUN apt-get update && \ + apt-get -y install curl libpython3.8-dev && \ + curl --remote-name --location "https://github.com/computationalpathologygroup/ASAP/releases/download/ASAP-2.1/ASAP-2.1-py38-Ubuntu2004.deb" && \ + dpkg --install ASAP-2.1-py38-Ubuntu2004.deb || true && \ + apt-get -f install --fix-missing --fix-broken --assume-yes && \ + ldconfig -v && \ + apt-get clean && \ + rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm # - Clone tiger LB2 codebase (master branch, fixed to commit 720f8dfca4624792c8e57915c4222efec5a0c2d4) @@ -26,11 +28,17 @@ RUN git clone https://github.com/vuno/tiger_challenge.git /vuno && \ rm -rf /vuno/.git && \ sed -i "238i \ torch.manual_seed(0)" /vuno/segmentation/inference.py -# Install tiger LB2 dependencies -RUN pip3 install --no-cache-dir -r /vuno/requirements.txt - -# Reinstall correct version of Numpy to function with ASAP 2.1 -RUN pip3 install --no-cache-dir --force-reinstall numpy==1.22 +# Setup and install algorithm pipenv environment +# 1. Ensure we configure a new empty pipenv for Python 3.8 +# 2. Link ASAP libraries into our environment +# 3. Install required torch and torchvision dependencies +# 4. Install tiger LB2 dependencies from requirements.txt +# 5. Upgrade version of numpy and numba to function correctly with ASAP +RUN pipenv install --python 3.8 && \ + echo "/opt/ASAP/bin" > /app/.venv/lib/python3.8/site-packages/asap.pth && \ + pipenv run pip install --no-cache-dir torch==2.0.1+cu118 torchvision==0.15.2+cu118 -f https://download.pytorch.org/whl/torch_stable.html && \ + pipenv run pip install --no-cache-dir -r /vuno/requirements.txt && \ + pipenv run pip install --no-cache-dir --upgrade numpy==1.24.4 numba==0.58.1 # Download and install model weights file from zenodo RUN rm -rf /vuno/pretrained_weights && \ diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 0f1d2137..7445eecd 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -11,11 +11,6 @@ from mhubio.core import Instance, InstanceData, IO, Module, ValueOutput, Meta, DataType, FileType from pathlib import Path -import numpy as np -import SimpleITK as sitk -import torch - -import sys import json @@ -30,20 +25,19 @@ class TilScoreOutput(ValueOutput): class TigerLB2Runner(Module): - CLI_SCRIPT_PATH = Path(__file__).parent.parent / "scripts" / "tiger_lb2_cli.py" + CLI_SCRIPT_PATH = Path(__file__).parent / "cli.py" @IO.Instance() @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TILScore', 'in_data', the='TIGER LB2 TIL score') @IO.OutputData('til_score', TilScoreOutput, data='in_data', the='TIGER LB2 TIL score - percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0-100 (percent).') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, til_score: TilScoreOutput) -> None: - if not torch.cuda.is_available(): - raise NotImplementedError("TigerLB2Runner requires CUDA to be available!") - - # Execute the Tiger LB2 Algorithm through a Python subprocess + # Execute the Tiger LB2 Algorithm through a Python subprocess and associated pipenv environment self.subprocess( [ - sys.executable, + "pipenv", + "run", + "python", str(self.CLI_SCRIPT_PATH), in_data.abspath, out_data.abspath, diff --git a/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py b/models/gc_tiger_lb2/utils/cli.py similarity index 72% rename from models/gc_tiger_lb2/scripts/tiger_lb2_cli.py rename to models/gc_tiger_lb2/utils/cli.py index e6bdb9ec..9b5827de 100644 --- a/models/gc_tiger_lb2/scripts/tiger_lb2_cli.py +++ b/models/gc_tiger_lb2/utils/cli.py @@ -1,12 +1,16 @@ """ --------------------------------------------------------- -Mhub / DIAG - CLI Run script for the TIGER LB2 Algorithm --------------------------------------------------------- - --------------------------------------------------------- +----------------------------------------------------------- +GC / MHub - CLI Run script for the TIGER LB2 Algorithm + The model algorith was wrapped in a CLI to ensure + the mhub framework is able to properly capture the nnUNet + stdout/stderr outputs. Furthermore, it simplifies running + the algorithm in its own environment using pipenv. +----------------------------------------------------------- + +----------------------------------------------------------- Author: Sil van de Leemput Email: sil.vandeleemput@radboudumc.nl --------------------------------------------------------- +----------------------------------------------------------- """ import argparse From 653799a53773c9326389c889020f4fc5f27ad015 Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 18 Apr 2024 10:09:15 +0200 Subject: [PATCH 22/24] pull source from forked repo, added segmentation output as mha --- models/gc_tiger_lb2/config/default.yml | 1 + models/gc_tiger_lb2/dockerfiles/Dockerfile | 9 +++------ models/gc_tiger_lb2/utils/TigerLB2Runner.py | 19 +++++++++++-------- models/gc_tiger_lb2/utils/cli.py | 17 ++++++++++++----- 4 files changed, 27 insertions(+), 19 deletions(-) diff --git a/models/gc_tiger_lb2/config/default.yml b/models/gc_tiger_lb2/config/default.yml index 354cdf32..3f77e41a 100644 --- a/models/gc_tiger_lb2/config/default.yml +++ b/models/gc_tiger_lb2/config/default.yml @@ -29,3 +29,4 @@ modules: require_data_confirmation: true targets: - json-->[i:sid]/gc_tiger_lb2_til_score.json + - mha:mod=seg-->[i:sid]/gc_tiger_lb2_segmentation.mha diff --git a/models/gc_tiger_lb2/dockerfiles/Dockerfile b/models/gc_tiger_lb2/dockerfiles/Dockerfile index e168c625..c3896fb0 100644 --- a/models/gc_tiger_lb2/dockerfiles/Dockerfile +++ b/models/gc_tiger_lb2/dockerfiles/Dockerfile @@ -20,13 +20,10 @@ RUN apt-get update && \ rm ASAP-2.1-py38-Ubuntu2004.deb # Install tiger LB2 algorithm -# - Clone tiger LB2 codebase (master branch, fixed to commit 720f8dfca4624792c8e57915c4222efec5a0c2d4) +# - Clone tiger LB2 codebase (master branch, fixed to version 0.1.0) # - Subsequently we remove the .git directory to procuce a compacter docker layer -# - We also fix a reproducibility issue with the segmentation prediction by fixing the torch random seed -RUN git clone https://github.com/vuno/tiger_challenge.git /vuno && \ - cd /vuno && git reset --hard 720f8dfca4624792c8e57915c4222efec5a0c2d4 && \ - rm -rf /vuno/.git && \ - sed -i "238i \ torch.manual_seed(0)" /vuno/segmentation/inference.py +RUN git clone --depth 1 --branch 0.1.0 https://github.com/DIAGNijmegen/tiger_vuno_algorithm /vuno && \ + rm -rf /vuno/.git # Setup and install algorithm pipenv environment # 1. Ensure we configure a new empty pipenv for Python 3.8 diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 7445eecd..6ccd6eec 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -29,9 +29,10 @@ class TigerLB2Runner(Module): @IO.Instance() @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') - @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TILScore', 'in_data', the='TIGER LB2 TIL score') + @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TIL', 'in_data', the='TIGER LB2 TIL score') + @IO.Output('out_segmentation', 'gc_tiger_lb2_segmentation.tif', 'mha:mod=seg:model=TigerLB2TIL', 'in_data', the='Raw initial TIGER LB2 segmentation map for the input WSI image. The labels are: 1-Tumor, 2-Stroma, 3-Rest.') @IO.OutputData('til_score', TilScoreOutput, data='in_data', the='TIGER LB2 TIL score - percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0-100 (percent).') - def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, til_score: TilScoreOutput) -> None: + def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, out_segmentation: InstanceData, til_score: TilScoreOutput) -> None: # Execute the Tiger LB2 Algorithm through a Python subprocess and associated pipenv environment self.subprocess( [ @@ -41,15 +42,17 @@ def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData str(self.CLI_SCRIPT_PATH), in_data.abspath, out_data.abspath, + out_segmentation.abspath, ] ) - # Validate that the required output file was generated by the subprocess - if not Path(out_data.abspath).is_file(): - raise FileNotFoundError( - f"Couldn't find expected output file: `{out_data.abspath}`. " - f"The subprocess `{self.CLI_SCRIPT_PATH}` did not generate the required output file." - ) + # Validate that the required output file(s) were generated by the subprocess + for expected_file_path in (out_data.abspath, out_segmentation.abspath): + if not Path(expected_file_path).is_file(): + raise FileNotFoundError( + f"Couldn't find expected output file: `{expected_file_path}`. " + f"The subprocess `{self.CLI_SCRIPT_PATH}` did not generate the required output file." + ) # export output til score as data as well with open(out_data.abspath, "r") as f: diff --git a/models/gc_tiger_lb2/utils/cli.py b/models/gc_tiger_lb2/utils/cli.py index 9b5827de..ca973e1d 100644 --- a/models/gc_tiger_lb2/utils/cli.py +++ b/models/gc_tiger_lb2/utils/cli.py @@ -12,11 +12,11 @@ Email: sil.vandeleemput@radboudumc.nl ----------------------------------------------------------- """ - import argparse from pathlib import Path import torch +import SimpleITK # The required pipeline methods are imported from the tiger_challenge repository # The algorithm.rw module is imported for IO operations @@ -28,24 +28,31 @@ def tiger_lb2_cli() -> None: parser = argparse.ArgumentParser("Tiger LB2 Run CLI") parser.add_argument("input_file", type=str, help="Input WSI TIFF file path") parser.add_argument("output_file", type=str, help="Output JSON file path") + parser.add_argument("output_segmentation_file", type=str, help="Output segmentation MHA file path") args = parser.parse_args() run_tiger_lb2( wsi_filepath=Path(args.input_file), - output_json_file=Path(args.output_file) + output_json_file=Path(args.output_file), + output_segmentation_file=Path(args.output_segmentation_file), ) -def run_tiger_lb2(wsi_filepath: Path, output_json_file: Path) -> None: +def run_tiger_lb2(wsi_filepath: Path, output_json_file: Path, output_segmentation_file: Path) -> None: if not torch.cuda.is_available(): raise RuntimeError("run_tiger_lb2 requires CUDA to be available!") print(f"Input WSI: {wsi_filepath}") wsi_mri = rw.open_multiresolutionimage_image(wsi_filepath) - tils_score_writer = rw.TilsScoreWriter(output_json_file) - tils_score = tils_pipeline.run_tils_pipeline(wsi_mri) + print("Run pipeline") + tils_score, seg_mask_np = tils_pipeline.run_tils_pipeline(wsi_mri) + + print(f"Writing segmentation map to {output_segmentation_file}") + seg_mask_sitk = SimpleITK.GetImageFromArray(seg_mask_np) + SimpleITK.WriteImage(seg_mask_sitk, str(output_segmentation_file), True) print(f"Writing tils score to {output_json_file}") + tils_score_writer = rw.TilsScoreWriter(output_json_file) tils_score_writer.set_tils_score(tils_score=tils_score) tils_score_writer.save() From 0571aada2d5b946dc1d6a931f389d1d873694e2a Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Wed, 24 Apr 2024 10:24:35 +0200 Subject: [PATCH 23/24] meta.json - add tumor and stroma segmentation mask description to outputs --- models/gc_tiger_lb2/meta.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/models/gc_tiger_lb2/meta.json b/models/gc_tiger_lb2/meta.json index 7b78fbd2..123a7609 100644 --- a/models/gc_tiger_lb2/meta.json +++ b/models/gc_tiger_lb2/meta.json @@ -23,6 +23,13 @@ "label": "TIL score", "description": "Percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0 (percent) to 100 (percent).", "classes": [] + }, + { + "type": "Prediction", + "valueType": "Segmentation mask", + "label": "WSI Tumor and stroma segmentation mask", + "description": "WSI tumor and stroma segmentation mask (MHA) used as first step in the pipeline. This segmentation mask was not validated, use it at your own risk! The labels are: 1-tumor, 2-stroma, 3-rest.", + "classes": [] } ], "model": { From 4bc6631dd0cb473b8904a52eb535509dad1b526e Mon Sep 17 00:00:00 2001 From: silvandeleemput Date: Thu, 25 Apr 2024 12:00:33 +0200 Subject: [PATCH 24/24] runner.py - fix output type filename --- models/gc_tiger_lb2/utils/TigerLB2Runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/gc_tiger_lb2/utils/TigerLB2Runner.py b/models/gc_tiger_lb2/utils/TigerLB2Runner.py index 6ccd6eec..de72d79a 100644 --- a/models/gc_tiger_lb2/utils/TigerLB2Runner.py +++ b/models/gc_tiger_lb2/utils/TigerLB2Runner.py @@ -30,7 +30,7 @@ class TigerLB2Runner(Module): @IO.Instance() @IO.Input('in_data', 'tiff:mod=sm', the='input whole slide image Tiff') @IO.Output('out_data', 'gc_tiger_lb2_til_score.json', 'json:model=TigerLB2TIL', 'in_data', the='TIGER LB2 TIL score') - @IO.Output('out_segmentation', 'gc_tiger_lb2_segmentation.tif', 'mha:mod=seg:model=TigerLB2TIL', 'in_data', the='Raw initial TIGER LB2 segmentation map for the input WSI image. The labels are: 1-Tumor, 2-Stroma, 3-Rest.') + @IO.Output('out_segmentation', 'gc_tiger_lb2_segmentation.mha', 'mha:mod=seg:model=TigerLB2TIL', 'in_data', the='Raw initial TIGER LB2 segmentation map for the input WSI image. The labels are: 1-Tumor, 2-Stroma, 3-Rest.') @IO.OutputData('til_score', TilScoreOutput, data='in_data', the='TIGER LB2 TIL score - percentage of stromal area covered by tumour infiltrating lymphocytes. Values between 0-100 (percent).') def task(self, instance: Instance, in_data: InstanceData, out_data: InstanceData, out_segmentation: InstanceData, til_score: TilScoreOutput) -> None: # Execute the Tiger LB2 Algorithm through a Python subprocess and associated pipenv environment