-
Notifications
You must be signed in to change notification settings - Fork 0
/
bibliography.bib
173 lines (147 loc) · 33 KB
/
bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
@Proceedings{COMPAY2024,
name = {Proceedings of the MICCAI Workshop on Computational Pathology},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
shortname = {MICCAI COMPAYL 2024},
year = {2024},
start = {2024-10-06},
end = {2024-10-06},
published = {2024-11-17},
editor = {Francesco Ciompi and Nadieh Khalili and Linda Studer and Milda Poceviciute and Amjad Khan and Mitko Veta and Yiping Jiao and Neda Haj-Hosseini and Hao Chen and Shan Raza and Fayyaz Minhas and Inti Zlobec and Nikolay Burlutskiy and Veronica Vilaplana and Biagio Brattoli and Henning Muller and Manfredo Atzori and Shan Raza and Fayyaz Minhas},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
address = {Marrakesh, Morocco},
volume = {254}
}
@InProceedings{keshvarikhojasteh24,
title = {Multi-head Attention-based Deep Multiple Instance Learning},
author = {Keshvarikhojasteh, Hassan and Pluim, Josien P. W. and Veta, Mitko},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {1--12},
abstract = {This paper introduces MAD-MIL, a Multi-head Attention-based Deep Multiple Instance Learning model, designed for weakly supervised Whole Slide Images (WSIs) classification in digital pathology. Inspired by the multi-head attention mechanism of the Transformer, MAD-MIL simplifies model complexity while achieving competitive results against advanced models like CLAM and DS-MIL. Evaluated on the MNIST-BAGS and public datasets, including TUPAC16, TCGA BRCA, TCGA LUNG, and TCGA KIDNEY MAD-MIL consistently outperforms ABMIL. This demonstrates enhanced information diversity, interpretability, and efficiency in slide representation. The model’s effectiveness, coupled with fewer trainable parameters and lower computational complexity makes it a promising solution for automated pathology workflows. Our code is available at https://github.com/tueimage/MAD-MIL. }
}
@InProceedings{pochet24,
title = {Lymphocytes subtyping on H&E slides with automatic labelling through same-tissue stained ImmunoFluorescence images},
author = {Pochet, Etienne and Ayestas, Luis Cano and Casse, Alhassan and Tang, Qi and Trullo, Roger},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {13-24},
abstract = {Accurate identification and classification of immune cells within tissue samples are critical for understanding disease mechanisms and predicting treatment responses as a cornerstone for personalized medicine. Traditional histopathology relies on hematoxylin and eosin (H&E) staining, which provides structural context but lacks specificity for immune cell sub-types, preventing pathologists from more precise identification. In contrast, immunofluorescent (IF) staining enables precise targeting of specific markers, but this recently developed technology is very costly and not widely applied in clinical practice yet. In this work, we propose a method to leverage registered pairs of H&E and IF stained images from the same tissue to automatically generate cell type labels for H&E from IF marker expression, allowing for precise identification. In particular, we demonstrate the feasibility of lymphocyte sub-typing from H&E images by training cell-level classifiers to accurately distinguish T-cells subtypes (CD45 / CD3e / CD4 / CD8a). Full code will be made available.}
}
@InProceedings{liu24,
title = {WSI-SAM: Multi-resolution Segment Anything Model (SAM) for histopathology whole-slide images},
author = {Liu, Hong and Yang, Haosen and Diest, Paul J. van and Pluim, Josien P.W. and Veta, Mitko},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {25-37},
abstract = {The Segment Anything Model (SAM) marks a significant advancement in segmentation models, offering robust zero-shot abilities and dynamic prompting. However, existing medical SAMs are not suitable for the multi-scale nature of whole-slide images (WSIs), restricting their effectiveness. To resolve this drawback, we present WSI-SAM, enhancing SAM with precise object segmentation capabilities for histopathology images using multi-resolution patches, while preserving its efficient, prompt-driven design, and zero-shot abilities. To fully exploit pretrained knowledge while minimizing training overhead, we keep SAM frozen, introducing only minimal extra parameters and computational overhead. In particular, we introduce High-Resolution (HR) token, Low-Resolution (LR) token and dual mask decoder. This decoder integrates the original SAM mask decoder with a lightweight fusion module that integrates features at multiple scales. Instead of predicting a mask independently, we integrate HR and LR token at intermediate layer to jointly learn features of the same object across multiple resolutions. Experiments show that our WSI-SAM outperforms state-of-the-art SAM and its variants. In particular, our model outperforms SAM by 4.1 and 2.5 percent points on a ductal carcinoma in situ (DCIS) segmentation tasks and breast cancer metastasis segmentation task (CAMELYON16 data set). The code will be available at https://github.com/HongLiuuuuu/WSI-SAM.}
}
@InProceedings{chen24,
title = {Benchmarking Embedding Aggregation Methods in Computational Pathology: A Clinical Data Perspective},
author = {Chen, Shengjia and Campanella, Gabriele and Elmas, Abdulkadir and Stock, Aryeh and Zeng, Jennifer and Polydorides, Alexandros D. and Schoenfeld, Adam J. and Huang, Kuan-lin and Houldsworth, Jane and Vanderbilt, Chad and Fuchs, Thomas J.},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {38-50},
abstract = {Recent advances in artificial intelligence (AI), in particular self-supervised learning of foundation models (FMs), are revolutionizing medical imaging and computational pathology (CPath). A constant challenge in the analysis of digital Whole Slide Images (WSIs) is the problem of aggregating tens of thousands of tile-level image embeddings to a slide-level representation. Due to the prevalent use of datasets created for genomic research, such as TCGA, for method development, the performance of these techniques on diagnostic slides from clinical practice has been inadequately explored. This study conducts a thorough benchmarking analysis of ten slide-level aggregation techniques across nine clinically relevant tasks, including diagnostic assessment, biomarker classification, and outcome prediction. The results yield following key insights: (1) Embeddings derived from domainspecific (histological images) FMs outperform those from generic ImageNet-based models across aggregation methods. (2) Spatial-aware aggregators enhance the performance significantly when using ImageNet pre-trained models but not when using FMs. (3) No single model excels in all tasks and spatially-aware models do not show general superiority as it would be expected. These findings underscore the need for more adaptable and universally applicable aggregation techniques, guiding future research towards tools that better meet the evolving needs of clinical-AI in pathology. The code used in this work are available at https://github.com/fuchs-lab-public/CPath_SABenchmark}
}
@InProceedings{fan24,
title = {CDNet: Causal Inference inspired Diversified Aggregation Convolution for Pathology Image Segmentation},
author = {Fan, Dawei and Gao, Yifan and Yu, Jiaming and Yang, Changcai and Chen, Riqing and Wei, Lifang},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {51-60},
abstract = {Deep learning models have shown promising performance for Nuclei segmentation in the field of pathology image analysis. However, training a robust model from multiple domains remains a great challenge for Nuclei segmentation. Additionally, the shortcomings of background noise, highly overlapping between Nuclei, and blurred edges often lead to poor performance. To address these challenges, we propose a novel framework termed CDNet, which combines Causal Inference Module (CIM) with Diversified Aggregation Convolution (DAC) techniques. The DAC module is designed which incorporates diverse downsampling features through a simple, parameter-free attention module (SimAM), aiming to overcome the problems of edge blurring. Furthermore, we introduce CIM to leverage sample weighting by directly removing the spurious correlations between features for every input sample and concentrating more on the correlation between features and labels. Extensive experiments on the MoNuSeg and GLySAC datasets yielded promising results, with mean intersection over union (mIoU) and Dice similarity coefficient (DSC) scores increasing by 3.59\% and 2.61\%, and 2.71\% and 2.04\%, respectively, outperforming other state-of-the-art methods.}
}
@InProceedings{lucassen24,
title = {Preprocessing Pathology Reports for Vision-Language Model Development},
author = {Lucassen, Ruben T. and Luijtgaarden, Tijn van de and Moonemans, Sander P. J. and Blokx, Willeke A. M. and Veta, Mitko},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {61-71},
abstract = {Pathology reports are increasingly being used for development of vision-language models. Because the reports often include information that cannot directly be derived from paired images, careful selection of information is required to prevent hallucinations in tasks like report generation. In this paper, we present a language model for subsentence segmentation based on the information content, as part of a preprocessing workflow for 27,500 pathology reports of cutaneous melanocytic lesions. After initial clean up, the reports were first translated from Dutch to English and then segmented by separate language models. Both models were developed using an iterative approach, in which the development dataset was expanded with manually corrected model predictions for previously unannotated reports before finetuning the next version of the models. Over the course of eight iterations, the development dataset was in the end scaled up to 1,500 translated and annotated reports. On the independent test set of 3,597 sentences from 150 reports, 219 translation errors (6,1\%) of different severities were counted. The subsentence segmentation model achieved a strong predictive performance on the test set with a macro average F1 -score of 0.921 (95\% CI, 0.890-0.940) and a weighted average F1 -score of 0.952 (95\% CI, 0.944-0.960) over 13 different classes. The remaining 25,850 unannotated reports were translated and segmented using the final models to complete the dataset preprocessing. Differences in word count and class distribution between section types of the reports were explored in preparation for future vision-language modeling. The presented methodology is generic and can, therefore, easily be extended to multiple or different pathology domains beyond melanocytic skin lesions. Code and trained model parameters are made publicly available.}
}
@InProceedings{ahmed24,
title = {PathAlign: A vision–language model for whole slide images in histopathology},
author = {Ahmed, Faruk and Sellergen, Andrew and Yang, Lin and Xu, Shawn and Babenko, Boris and Ward, Abbi and Olson, Niels and Mohtashamian, Arash and Matias, Yossi and Corrado, Greg S. and Duong, Quang and Webster, Dale R. and Shetty, Shravya and Golden, Daniel and Liu, Yun and Steiner, David F. and Wulczyn, Ellery},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {72-108},
abstract = {Microscopic interpretation of histopathology images underlies many important diagnostic and treatment decisions. While advances in vision–language modeling raise new oppor- tunities for analysis of such images, the gigapixel-scale size of whole slide images (WSIs) introduces unique challenges. Additionally, pathology reports simultaneously highlight key findings from small regions while also aggregating interpretation across multiple slides, often making it difficult to create robust image–text pairs. As such, pathology reports remain a largely untapped source of supervision in computational pathology, with most efforts relying on region-of-interest annotations or self-supervision at the patch-level. In this work, we develop a vision–language model based on the BLIP-2 framework using WSIs paired with curated text from pathology reports. This enables applications utilizing a shared image–text embedding space, such as text or image retrieval for finding cases of interest, as well as integration of the WSI encoder with a frozen large language model (LLM) for WSI-based generative text capabilities such as report generation or AI-in-the-loop interactions. We utilize a de-identified dataset of over 350,000 WSIs and diagnostic text pairs, spanning a wide range of diagnoses, procedure types, and tissue types. We present pathologist evaluation of text generation and text retrieval using WSI embeddings, as well as results for WSI classification and workflow prioritization (slide-level triaging). Model-generated text for WSIs was rated by pathologists as accurate, without clinically significant error or omission, for 78\% of WSIs on average. This work demonstrates exciting potential capabilities for language-aligned WSI embeddings.}
}
@InProceedings{ben-david24,
title = {Deep-Learning Based Virtual Stain Multiplexing Immunohistochemistry Slides – a Pilot Study},
author = {Ben-David, Oded and Arbel, Elad and Rabkin, Daniela and Remer, Itay and Ben-Dor, Amir and Aviel-Ronen, Sarit and Aidt, Frederik and Hagedorn-Olsen, Tine and Jacobsen, Lars and Kersch, Kristopher and Tsalenko, Anya},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {107-120},
abstract = {In this paper, we introduce a novel deep-learning based method for virtual stain multiplexing of immunohistochemistry (IHC) stains. Traditional IHC techniques generally involve a single stain that highlights a single target protein, but this can be enriched with stain multiplexing. Our proposed method leverages sequential staining to train a model to virtually stain multiplex additional IHC on top of a digitally scanned whole slide image (WSI), without requiring a complex setup or any additional tissue sections and stains. To this end, we designed a novel model architecture, guided by the physical sequential staining process which provides superior performance. The model was optimized using a custom loss function that combines mean squared error (MSE) with semantic information, allowing the model to focus on learning the relevant differences between the input and ground truth. As an example application, we consider the problem of detecting macro-phages on PD-L1 IHC 22C3 pharmDx NSCLC WSIs. We demonstrated virtual stain multiplexing CD68 on top of PD-L1 22C3 pharmDx stained slides, which helps to detect macrophages and distinguish them from PD-L1+ tumor cells, which are often visually similar. Our pilot-study results showed significant improvement in a pathologist’s ability to distinguish macrophages when using the virtually stain multiplexed CD68 decision supporting layer.}
}
@InProceedings{nguyen24,
title = {ContriMix: Scalable stain color augmentation for domain generalization without domain labels in digital pathology},
author = {Nguyen, Tan H. and Juyal, Dinkar and Li, Jin and Prakash, Aaditya and Nofallah, Shima and Shah, Chintan and Gullapally, Sai Chowdary and Yu, Limin and Griffin, Michael and Sampat, Anand and Abel, John and Lee, Justin and Taylor-Weiner, Amaro},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {121-130},
abstract = {Differences in staining and imaging procedures can cause significant color variations in histopathology images, leading to poor generalization when deploying deep-learning models trained from a different data source. Various color augmentation methods have been proposed to generate synthetic images during training to make models more robust, eliminating the need for stain normalization during test time. Many color augmentation methods leverage domain labels to generate synthetic images. This approach causes three significant challenges to scaling such a model. Firstly, incorporating data from a new domain into deep-learning models trained on existing domain labels is not straightforward. Secondly, dependency on domain labels prevents the use of pathology images without domain labels to improve model performance. Finally, implementation of these methods becomes complicated when multiple domain labels (e.g., patient identification, medical center, etc) are associated with a single image. We introduce ContriMix, a novel domain label free stain color augmentation method based on DRIT++, a style-transfer method. ContriMix leverages sample stain color variation within a training minibatch and random mixing to extract content and attribute information from pathology images. This information can be used by a trained ContriMix model to create synthetic images to improve the performance of existing classifiers. ContriMix outperforms competing methods on the Camelyon17-WILDS dataset. Its performance is consistent across different slides in the test set while being robust to the color variation from rare substances in pathology images. Our source code and pre-trained checkpoints are available at https://gitlab.com/huutan86 intraminibatch_permutation_drit.}
}
@InProceedings{naidoo24,
title = {SurvivMIL: A Multimodal, Multiple Instance Learning Pipeline for Survival Outcome of Neuroblastoma Patients},
author = {Naidoo, Reed and Fourkioti, Olga and Vries, Matt De and Bakal, Chris},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {131-141},
abstract = {Integrating Whole Slide Images (WSIs) and patient-specific health records (PHRs) can facilitate survival analysis of high-risk neuroblastoma (NB) cancer patients. However, this integration is challenging due to extreme differences in data dimensionality. Specifically, while PHRs are at the patient level and contain sparse information, WSIs are highly information-dense and processed at high resolution. Adjacent to this challenge, specifically in the context of survival analysis under the Multiple Instance Learning (MIL) framework, there are limitations with approximating the hazard function because of varying size WSIs and implicitly limited batch sizes. To address these challenges, we propose SURVIVMIL, a late fusion MIL model that integrates multimodal prognostic data for predicting NB patient outcomes. Our approach fuses predictions from both modalities and incorporates a novel concordance-based loss function via a specifically designed buffer branch, which mitigates the batch size limitation by accumulating survival predictions. Our model is evaluated on an in-house pediatric NB patient dataset, providing insights into the contributions of each modality to predictive performance. The code will be available at: https://github.com/reednaidoo/SurvivMIL_COMPAYL.git}
}
@InProceedings{innani24,
title = {Multi-scale Whole Slide Image Assessment Improves Deep Learning based WHO 2021 Glioma Classification},
author = {Innani, Shubham and Nasrallah, MacLean P. and Bell, W. Robert and Baheti, Bhakti and Bakas, Spyridon},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {142-153},
abstract = {The 2021 WHO classification of tumors of the central nervous system necessitates the integration of molecular and histologic profiling for a conclusive diagnosis of glioma. Molecular profiling is time-consuming and may not always be available. We hypothesize that subvisual cues in whole slide images (WSI), not perceivable by the naked eye, carry a predictive value of molecular characteristics and can allow categorization of the adult infiltrative gliomas in one of three major types: i) oligodendroglioma, ii) astrocytoma, and iii) glioblastoma. Towards this end, we present a computational pipeline comprising patch analysis of Hematoxylin and Eosin (H&E)-stained WSIs, feature encoding with ImageNet pretrained ResNet50, and an attention-based multiple instance learning paradigm. We trained individual models at four distinct magnification levels (20x, 10x, 5x, 2.5x), and assessed the fusion of various ensemble combinations to mimic the WSI assessment by expert pathologists, to capture local and global context. Our results using a multi-scale approach demonstrate 3-9\% improvement in classification accuracy when compared with models utilising a single magnification level. This advancement underscores the efficacy of attention-based models combined with multi-scale approaches in augmenting traditional assessment of WSIs. The implications of our findings are significant in enhancing glioma diagnosis and treatment planning in neuro-oncology, by enabling diagnostics in low-resource environments where molecular profiling is not available.}
}
@InProceedings{saada24,
title = {CARMIL: Context-Aware Regularization on Multiple Instance Learning models for Whole Slide Images},
author = {Saada, Thiziri Nait and Di-Proietto, Valentina and Schmauch, Benoit and Loga, Katharina Von and Fidon, Lucas},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {154-169},
abstract = {Multiple Instance Learning (MIL) models have proven effective for cancer prognosis from Whole Slide Images. However, the original MIL formulation incorrectly assumes the patches of the same image to be independent, leading to a loss of spatial context as information flows through the network. Incorporating contextual knowledge into predictions is particularly important given the inclination for cancerous cells to form clusters and the presence of spatial indicators for tumors. State-of-the-art methods often use attention mechanisms eventually combined with graphs to capture spatial knowledge. In this paper, we take a novel and transversal approach, addressing this issue through the lens of regularization. We propose Context-Aware Regularization for Multiple Instance Learning (CARMIL), a versatile regularization scheme designed to seamlessly integrate spatial knowledge into any MIL model. Additionally, we present a new and generic metric to quantify the Context- Awareness of any MIL model when applied to Whole Slide Images, resolving a previously unexplored gap in the field. The efficacy of our framework is evaluated for two survival analysis tasks on glioblastoma (TCGA GBM) and colon cancer data (TCGA COAD).}
}
@InProceedings{robbins24,
title = {Prediction of {KRAS} mutation status from H\&E foundation model embeddings in non-small cell lung cancer},
author = {Robbins, Marc and Loo, Jessica and Vyawahare, Saurabh and Wang, Yang Von and Mcneil, Carson and Steiner, Dave and Rao, Sudha and Wong, Pok Fai and Rivlin, Ehud and Weaver, Shamira and Goldenberg, Roman},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {170-179},
abstract = {We predicted KRAS mutation status on non-small cell lung cancer (NSCLC) H&E images from foundation model embeddings. We evaluated a variety of attention-based multiple instance learning (MIL) models and aggregation strategies for a tilewise linear classifier. MIL with self-attention performed the best (AUC=0.822) followed by the minimum over tiles classified with the linear model (AUC=0.810). Self-attention was necessary for MIL to surpass tilewise linear classification when a wide range of aggregation techniques was considered.}
}
@InProceedings{abdo24,
title = {StairwayToStain: A Gradual Stain Translation Approach for Glomeruli Segmentation},
author = {Abdo, Ali Alhaj and Mhiri, Islem and Nisar, Zeeshan and Seeliger, Barbara and Lampert, Thomas},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {180-191},
abstract = {Image-to-image translation (I2I) has advanced digital pathology by enabling knowledge transfer across clinical contexts through unsupervised domain adaptation (UDA). Although promising, most I2I frameworks transfer source-labeled data to target unlabeled data directly in a one-off way. However, translating stains from information-poor domains to information-rich ones can lead to a domain shift problem due to the large discrepancy between domains. To address this issue, we propose StairwayToStain (STS), an unsupervised gradual stain translation framework that uses intermediate stains to bridge the gap between the source and target stain. Our method is grounded in three main phases: (i) measuring the domain shift between different stains, (ii) defining a translation path, and (iii) performing the gradual stain translation. Our method demonstrates its efficacy in improving glomeruli segmentation when translating from immunohistochemical (IHC) to histochemical stains, as well as between different IHC stains. Comprehensive experiments on stain translation demonstrate STS’s competitive results compared to its variants and state-of-the-art direct I2I methods in achieving UDA. Moreover, we are able to generate additional stains during the translation process. Our method presents the first framework for gradual domain adaptation in stain translation.}
}
@InProceedings{spyretos24,
title = {Early Fusion of H&E and IHC Histology Images for Pediatric Brain Tumor Classification},
author = {Spyretos, Christoforos and Tampu, Iulian Emil and Khalili, Nadieh and Ladino, Juan Manuel Pardo and Nyman, Per and Blystad, Ida and Eklund, Anders and Haj-Hosseini, Neda},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {192-202},
abstract = {This study explores the application of computational pathology to analyze pediatric brain tumors utilizing hematoxylin and eosin (H&E) and immunohistochemistry (IHC) whole slide images (WSIs). Experiments were conducted on H&E images for predicting tumor diagnosis and fusing them with unregistered IHC images to investigate potential improvements. Patch features were extracted using UNI, a vision transformer (ViT) model trained on H&E data, and whole slide classification was achieved using the attention-based multiple instance learning CLAM framework. In the astrocytoma tumor classification, early fusion of the H&E and IHC significantly improved the differentiation between tumor grades (balanced accuracy: 0.82 ± 0.05 vs 0.84 ± 0.05). In the multiclass classification, H&E images alone had a balanced accuracy of 0.79 ± 0.03 without any improvement obtained when fused with IHC. The findings highlight the potential of using multi-stain fusion to advance the diagnosis of pediatric brain tumors, however, further fusion methods should be investigated.}
}
@InProceedings{polejowska24,
title = {Histopathobiome – integrating histopathology and microbiome data via multimodal deep learning},
author = {Polejowska, Agata and Boleij, Annemarie and Ciompi, Francesco},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {203-213},
abstract = {We introduce Histopathobiome, a term representing the integration of histopathology and microbiome data to explore tissue-microbe interactions. Using a dataset of colon biopsy whole-slide images paired with microbiota composition samples, we assess the benefits of combining these modalities to distinguish patients with inflammatory bowel disease (IBD) subtype – ulcerative colitis (UC) from non-IBD controls. Initially, we evaluate the unimodal performance of state-of-the-art algorithms using vectors representing bacterial species abundances or histopathology slide-level embeddings. We compare single-modality models with bimodal networks with various fusion strategies. Our results prove that histopathology and microbiome data are complementary in UC classification. By demonstrating improved performance over single-modality approaches, we prove that bimodal deep learning models can be used to learn meaningful and interpretable cross-modal tissue-microbe patterns.}
}
@InProceedings{pozzi24,
title = {Scoring Tumor-Infiltrating Lymphocytes in breast DCIS: A guideline-driven artificial intelligence approach},
author = {Pozzi, Matteo and Klubickova, Natalie and Campora, Michela and Meeuwsen, Frederique and Spronck, Joey and Lems, Carlijn and Stegeman, Michelle and Tessier, Leslie and Barbareschi, Mattia and Laak, Jeroen van der and Jurman, Giuseppe and Ciompi, Francesco},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {214-225},
abstract = {This study focuses on the assessment of Tumor-Infiltrating Lymphocytes (TILs) in Breast ductal carcinoma in situ (DCIS) by integrating artificial intelligence with international guidelines. DCIS is a non-invasive cancer with intrinsic potential to evolve to invasive breast cancer (IBC), making it critical to understand factors influencing this progression. TILs are a prognostic biomarker in IBC, but their role in DCIS remains under-explored. This work proposes an automated pipeline for computing TILs scores using deep learning for DCIS segmentation and TILs detection, following the guidelines of the International Immuno-Oncology Biomarker Working Group. We report the inter-observer variability at TILs scoring among Pathologists and show that the AI-based TILs scores have good concordance with human assessments. Future research will aim to reduce false positives in DCIS segmentation and detection, support the reference standard with immunohistochemical staining, and expand the dataset to enhance the robustness of the TILs detection algorithm. Ultimately, this method aims to aid Pathologists in assessing the risk associated with DCIS lesions.}
}
@InProceedings{shi24,
title = {Upscaling Prostate Cancer MRI Images to Cell-level Resolution Using Self-supervised Learning},
author = {Shi, Yaying and Das, Srijan and Yan, Yonghong},
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {226-236},
abstract = {Magnetic Resonance Imaging (MRI) plays a pivotal role in medical imaging, particularly in the diagnosis and treatment of cancers via radiography. However, one of the limitations of MRI is its low spatial resolution, which can hinder the accurate detection and characterization of cancerous lesions, especially those that are small or subtle in nature. There is a growing need for advancements in MRI technology to improve the resolution of MRI, particularly in the field of oncology, where precise detection and segmentation of tumors are crucial for effective treatment planning and optimal patient outcomes. In this paper, we proposed a self-supervised deep learning technique to upscale cancer MRI images to cell-level resolution with pathology Whole Slide Imaging (WSI). By integrating information from pathology WSIs with MRI images, this approach aims to create hybrid images that offer a more detailed and comprehensive view of cancer tissue structures. We evaluated our techniques using prostate lesions both on the similarity metrics and downstream segmentation tasks. For the similarity, our reconstructed fusion images can achieve an average 0.933 in structural similarity index. We improved lesion segmentation dice score from 57.3\% to 64.0\% on the test cases. Such fusion of the two imaging modalities shows promise for improving the accuracy and reliability of cancer diagnosis, guiding treatment decisions, and ultimately improving patient outcomes.}
}
@InProceedings{montalvo-garcia24,
title = {Stromal Tissue Segmentation in Multi-Stained Serial Histopathological Sections of Pancreatic Tumors},
author = {Montalvo-Garc{\'i}a, David and Ortu{\~n}o, Juan E. and Ramos-Guerra, Ana D. and Granados-Aparici, Sof{\'i}a and Goswami, Subhra S. and Diaz, Pablo Santiago and Patriarca-Amiano, Maria Evangelina and Gros, Joan Lop and Estudillo, Lidia and Coma, Mar Iglesias and Noguera, Rosa and Malats, Nuria and Ledesma-Carbayo, Mar{\'i}a J. },
booktitle = {Proceedings of the MICCAI Workshop on Computational Pathology},
pages = {237-248},
abstract = {In this work we propose and compare different deep learning algorithms for the segmentation of stromal regions in pancreatic histopathological image using three consecutive tissue sections, each uniquely stained with Hematoxylin and Eosin (H&E), Masson’s Trichrome, and Alcian Blue. After a non-rigid registration process, variations in tissue distribution between consecutive slides still persist, which leads to distinct desired segmentations of tissues for each stain, thus underscoring the need for a specific segmentation and co-segmentation approaches to achieve higher accuracy. We compare single stain models, with respect to multi-stain techniques that either consider the multiple stains all at once in training or are based on multi-branch siamese and co-segmentation techniques. We demonstrate superior performance in identifying stromal regions with the multi-stain approaches in comparison to the segmentation techniques applied to individual stains, by effectively utilizing the complementary information each staining technique provides. This advancement is poised to enhance the further evaluation of tumor microenvironment and stromal characteristics in patients with pancreatic cancer.}
}