[ { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2009_paper.pdf", "bibtext":"@InProceedings{ Zha_Disease_MICCAI2024,\n author = { Zhang, Jin and Shang, Muheng and Yang, Yan and Guo, Lei and Han, Junwei and Du, Lei },\n title = { { Disease Progression Prediction Incorporating Genotype-Environment Interactions: A Longitudinal Neurodegenerative Disorder Study } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Disease progression prediction is a fundamental yet challenging task in neurodegenerative disorders. Despite extensive research endeavors, disease progression fitting on brain imaging data alone may yield suboptimal performance due to the effect of potential interactions between genetic variations, proteomic expressions, and environmental exposures on the disease progression. To fill this gap, we draw on the idea of mutual-assistance (MA) learning and accordingly propose a fresh and powerful scheme, referred to as Mutual-Assistance Disease Progression fitting and Genotype-by-Environment interaction identification approach (MA-DPxGE). Specifically, our model jointly performs disease progression fitting using longitudinal imaging phenotypes and identification of genotype-by-environment interaction factors. To ensure stability and interpretability, we employ innovative penalties to discern significant risk factors. Moreover, we meticulously design adaptive mechanisms for loss-term reweighting, ensuring fair adjustments for each prediction task. Furthermore, due to high-dimensional genotype-by-environment interactions, we devise a rapid and efficient strategy to reduce runtime, ensuring practical availability and applicability. Experimental results on the Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI) dataset reveal that MA-DPxGE demonstrates superior performance compared to state-of-the-art approaches while maintaining exceptional interpretability. This outcome is pivotal in elucidating disease progression patterns and establishing effective strategies to mitigate or halt disease advancement.", "title":"Disease Progression Prediction Incorporating Genotype-Environment Interactions: A Longitudinal Neurodegenerative Disorder Study", "authors":[ "Zhang, Jin", "Shang, Muheng", "Yang, Yan", "Guo, Lei", "Han, Junwei", "Du, Lei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":0 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1241_paper.pdf", "bibtext":"@InProceedings{ Jeo_BrainWaveNet_MICCAI2024,\n author = { Jeong, Ah-Yeong and Heo, Da-Woon and Kang, Eunsong and Suk, Heung-Il },\n title = { { BrainWaveNet: Wavelet-based Transformer for Autism Spectrum Disorder Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The diagnosis of Autism Spectrum Disorder (ASD) using resting-state functional Magnetic Resonance Imaging (rs-fMRI) is commonly analyzed through functional connectivity (FC) between Regions of Interest (ROIs) in the time domain. However, the time domain has limitations in capturing global information. To overcome this problem, we propose a wavelet-based Transformer, BrainWaveNet, that leverages the frequency domain and learns spatial-temporal information for rs-fMRI brain diagnosis. Specifically, BrainWaveNet learns inter-relations between two different frequency-based features (real and imaginary parts) by crossattention mechanisms, which allows for a deeper exploration of ASD. In our experiments using the ABIDE dataset, we validated the superiority of BrainWaveNet by comparing it with competing deep learning methods. Furthermore, we analyzed significant regions of ASD for neurological interpretation.In our experiments using the ABIDE dataset, we validated the superiority of BrainWaveNet by comparing with competing deep learning methods. Furthermore, we analyzed significant regions of ASD for neurological interpretation.", "title":"BrainWaveNet: Wavelet-based Transformer for Autism Spectrum Disorder Diagnosis", "authors":[ "Jeong, Ah-Yeong", "Heo, Da-Woon", "Kang, Eunsong", "Suk, Heung-Il" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ku-milab\/BrainWaveNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":1 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2115_paper.pdf", "bibtext":"@InProceedings{ Kho_Unified_MICCAI2024,\n author = { Khor, Hee Guan and Yang, Xin and Sun, Yihua and Wang, Jie and Huang, Sijuan and Wang, Shaobin and Lu, Bai and Ma, Longfei and Liao, Hongen },\n title = { { Unified Prompt-Visual Interactive Segmentation of Clinical Target Volume in CT for Nasopharyngeal Carcinoma with Prior Anatomical Information } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The delineation of the Clinical Target Volume (CTV) is a crucial step in the radiotherapy (RT) planning process for patients with nasopharyngeal carcinoma (NPC). However, manual delineation is labor-intensive, and automatic CTV contouring for NPC is difficult due to the nasopharyngeal complexity, tumor variability, and judgement-based criteria. To address the above-mentioned problems, we introduce SAM-RT, the first large vision model (LVM) designed for CTV contouring in NPC. Given the anatomical dependency required for CTV contouring\u2014which encapsulates the Gross Tumor Volume (GTV) while minimizing exposure to Organs-at-Risk (OAR)\u2014our approach begins with the fine-tuning of the Segment Anything Model (SAM), using a Low-Rank Adaptation (LoRA) strategy for segmenting GTV and OAR across multi-center and multi-modality datasets. This step ensures SAM-RT initially integrates with anatomical prior knowledge for CTV contouring. To optimize the use of previously acquired knowledge, we introduce Sequential LoRA (SeqLoRA) to improve knowledge retention in SAM-RT during the fine-tuning for CTV contouring. We further introduce the Prompt-Visual Cross Merging Attention (ProViCMA) for enhanced image and prompt interaction, and the Gate-Regulated Prompt Adjustment (GaRPA) strategy, utilizing learnable gates to direct prompts for effective CTV task adaptation. Efficient utilization of knowledge across relevant datasets is essential due to sparse labeling of medical images for specific tasks. To achieve this, SAM-RT is trained using an information-querying approach. SAM-RT incorporates various prior knowledge: 1) Reliance of CTV on GTV and OAR, and 2) Eliciting expert knowledge in CTV contouring. Extensive quantitative and qualitative experiments validate our designs.", "title":"Unified Prompt-Visual Interactive Segmentation of Clinical Target Volume in CT for Nasopharyngeal Carcinoma with Prior Anatomical Information", "authors":[ "Khor, Hee Guan", "Yang, Xin", "Sun, Yihua", "Wang, Jie", "Huang, Sijuan", "Wang, Shaobin", "Lu, Bai", "Ma, Longfei", "Liao, Hongen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":2 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3619_paper.pdf", "bibtext":"@InProceedings{ Che_Medical_MICCAI2024,\n author = { Chen, Wenting and Wang, Pengyu and Ren, Hui and Sun, Lichao and Li, Quanzheng and Yuan, Yixuan and Li, Xiang },\n title = { { Medical Image Synthesis via Fine-Grained Image-Text Alignment and Anatomy-Pathology Prompting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Data scarcity and privacy concerns limit the availability of high-quality medical images for public use, which can be mitigated through medical image synthesis. However, current medical image synthesis methods often struggle to accurately capture the complexity of detailed anatomical structures and pathological conditions. To address these challenges, we propose a novel medical image synthesis model that leverages fine-grained image-text alignment and anatomy-pathology prompts to generate highly detailed and accurate synthetic medical images. Our methodology integrates advanced natural language processing techniques with image generative modeling, enabling precise alignment between descriptive text prompts and the synthesized images\u2019 anatomical and pathological details. The proposed approach consists of two key components: an anatomy-pathology prompting module and a fine-grained alignment-based synthesis module. The anatomy-pathology prompting module automatically generates descriptive prompts for high-quality medical images. To further synthesize high-quality medical images from the generated prompts, the fine-grained alignment-based synthesis module pre-defines a visual codebook for the radiology dataset and performs fine-grained alignment between the codebook and generated prompts to obtain key patches as visual clues, facilitating accurate image synthesis. We validate the superiority of our method through experiments on public chest X-ray datasets and demonstrate that our synthetic images preserve accurate semantic information, making them valuable for various medical applications.", "title":"Medical Image Synthesis via Fine-Grained Image-Text Alignment and Anatomy-Pathology Prompting", "authors":[ "Chen, Wenting", "Wang, Pengyu", "Ren, Hui", "Sun, Lichao", "Li, Quanzheng", "Yuan, Yixuan", "Li, Xiang" ], "id":"Conference", "arxiv_id":"2403.06835", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":3 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0453_paper.pdf", "bibtext":"@InProceedings{ Wen_Biophysicsbased_MICCAI2024,\n author = { Wen, Zheyu and Ghafouri, Ali and Biros, George },\n title = { { Biophysics-based data assimilation of longitudinal tau and amyloid-\u03b2 PET scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Misfolded tau and amyloid-beta (Abeta) are hallmark proteins of Alzheimer\u2019s Disease (AD). Due to their clinical significance, rich datasets that track their temporal evolution have been created. For example, ADNI has hundreds of subjects with PET imaging of both these two proteins. Interpreting and combining this data beyond statistical correlations remains a challenge. Biophysical models offer a complementary avenue to assimilating such complex data and eventually helping us better understand disease progression. To this end, we introduce a mathematical model that tracks the dynamics of four species (normal and abnormal tau and Abeta) and uses a graph to approximate their spatial coupling. The graph nodes represent gray matter regions of interest (ROI), and the edges represent tractography-based connectivity between ROIs. We model interspecies interactions, migration, proliferation, and clearance. Our biophysical model has seven unknown scalar parameters plus unknown initial conditions for tau and Abeta. Using imaging scans, we can calibrate these parameters by solving an inverse problem. The scans comprise longitudinal tau and Abeta PET scans, along with MRI for subject specific anatomy. We propose an inversion algorithm that stably reconstructs the unknown parameters. We verify and test its numerical stability in the presence of noise using synthetic data. We discovered that the inversion is more stable when using multiple scans. Finally, we apply the overall methodology on 334 subjects from the ADNI dataset and compare it to a commonly used tau-only model calibrated by a single PET scan. We report the R2 and relative fitting error metrics. The proposed method achieves R2 = 0.82 compared to R2 = 0.64 of the tau-only single-scan reconstruction.", "title":"Biophysics-based data assimilation of longitudinal tau and amyloid-\u03b2 PET scans", "authors":[ "Wen, Zheyu", "Ghafouri, Ali", "Biros, George" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":4 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0336_paper.pdf", "bibtext":"@InProceedings{ Xue_WSSADN_MICCAI2024,\n author = { Xue, Pengcheng and Nie, Dong and Zhu, Meijiao and Yang, Ming and Zhang, Han and Zhang, Daoqiang and Wen, Xuyun },\n title = { { WSSADN: A Weakly Supervised Spherical Age-Disentanglement Network for Detecting Developmental Disorders with Structural MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Structural magnetic resonance imaging characterizes the morphology and anatomical features of the brain and has been widely utilized in the diagnosis of developmental disorders. Given the dynamic nature of developmental disorder progression with age, existing methods for disease detection have incorporated age as either prior knowledge to be integrated or as a confounding factor to be disentangled through supervised learning. However, the excessive focus on age information in these methods restricts their capability to unearth disease-related features, thereby affecting the subsequent disease detection performance. To address this issue, this work introduces a novel weakly supervised learning-based method, namely, the Weakly Supervised Spherical Age Disentanglement Network (WSSADN). WSSADN innovatively combines an attention-based disentangler with the Conditional Generative Adversarial Network (CGAN) to remove normal developmental information from the brain representation of the patient with developmental disorder in a weakly supervised manner. By reducing the focus on age information during the disentanglement process, the effectiveness of the extracted disease-related features is enhanced, thereby increasing the accuracy of downstream disease identification. Moreover, to ensure effective convergence of the disentanglement and age information learning modules, we design a consistency regularization loss to align the age-related features generated by the disentangler and CGAN. We evaluated our method on three different tasks, including the detection of preterm neonates, infants with congenital heart disease, and autism spectrum disorders. The experimental results demonstrate that our method significantly outperforms existing state-of-the-art methods across all tasks.", "title":"WSSADN: A Weakly Supervised Spherical Age-Disentanglement Network for Detecting Developmental Disorders with Structural MRI", "authors":[ "Xue, Pengcheng", "Nie, Dong", "Zhu, Meijiao", "Yang, Ming", "Zhang, Han", "Zhang, Daoqiang", "Wen, Xuyun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xuepengcheng1231\/WSSADN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":5 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0867_paper.pdf", "bibtext":"@InProceedings{ Cho_MedFormer_MICCAI2024,\n author = { Chowdary, G. Jignesh and Yin, Zhaozheng },\n title = { { Med-Former: A Transformer based Architecture for Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In recent years, transformer-based image classification methods have demonstrated remarkable effectiveness across various image classification tasks. However, their application to medical images presents challenges, especially in the feature extraction capability of the network. Additionally, these models often struggle with the efficient propagation of essential information throughout the network, hindering their performance in medical imaging tasks. To overcome these challenges, we introduce a novel framework comprising Local-Global Transformer module and Spatial Attention Fusion module, collectively referred to as Med-Former. These modules are specifically designed to enhance the feature extraction capability at both local and global levels and improve the propagation of vital information within the network. To evaluate the efficacy of our proposed Med-Former framework, we conducted experiments on three publicly available medical image datasets: NIH Chest X-ray14, DermaMNIST, and BloodMNIST. Our results demonstrate that Med-Former outperforms state-of-the-art approaches underscoring its superior generalization capability and effectiveness in medical image classification.", "title":"Med-Former: A Transformer based Architecture for Medical Image Classification", "authors":[ "Chowdary, G. Jignesh", "Yin, Zhaozheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":6 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0556_paper.pdf", "bibtext":"@InProceedings{ Ye_Enabling_MICCAI2024,\n author = { Ye, Shuchang and Meng, Mingyuan and Li, Mingjian and Feng, Dagan and Kim, Jinman },\n title = { { Enabling Text-free Inference in Language-guided Segmentation of Chest X-rays via Self-guidance } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmentation of infected areas in chest X-rays is pivotal for facilitating the accurate delineation of pulmonary structures and pathological anomalies. Recently, multi-modal language-guided image segmentation methods have emerged as a promising solution for chest X-rays where the clinical text reports, depicting the assessment of the images, are used as guidance. Nevertheless, existing language-guided methods require clinical reports alongside the images, and hence, they are not applicable for use in image segmentation in a decision support context, but rather limited to retrospective image analysis after clinical reporting has been completed. In this study, we propose a self-guided segmentation framework (SGSeg) that leverages language guidance for training (multi-modal) while enabling text-free inference (uni-modal), which is the first that enables text-free inference in language-guided segmentation. We exploit the critical location information of both pulmonary and pathological structures depicted in the text reports and introduce a novel localization-enhanced report generation (LERG) module to generate clinical reports for self-guidance. Our LERG integrates an object detector and a location-based attention aggregator, weakly-supervised by a location-aware pseudo-label extraction module. Extensive experiments on a well-benchmarked QaTa-COV19 dataset demonstrate that our SGSeg achieved superior performance than existing uni-modal segmentation methods and closely matched the state-of-the-art performance of multi-modal language-guided segmentation methods.", "title":"Enabling Text-free Inference in Language-guided Segmentation of Chest X-rays via Self-guidance", "authors":[ "Ye, Shuchang", "Meng, Mingyuan", "Li, Mingjian", "Feng, Dagan", "Kim, Jinman" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ShuchangYe-bib\/SGSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":7 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0531_paper.pdf", "bibtext":"@InProceedings{ Cho_SliceConsistent_MICCAI2024,\n author = { Choo, Kyobin and Jun, Youngjun and Yun, Mijin and Hwang, Seong Jae },\n title = { { Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In neuroimaging, generally, brain CT is more cost-effective and accessible imaging option compared to MRI. Nevertheless, CT exhibits inferior soft-tissue contrast and higher noise levels, yielding less precise structural clarity. In response, leveraging more readily available CT to construct its counterpart MRI, namely, medical image-to-image translation (I2I), serves as a promising solution. Particularly, while diffusion models (DMs) have recently risen as a powerhouse, they also come with a few practical caveats for medical I2I. First, DMs\u2019 inherent stochasticity from random noise sampling cannot guarantee consistent MRI generation that faithfully reflects its CT. Second, for 3D volumetric images which are prevalent in medical imaging, naively using 2D DMs leads to slice inconsistency, e.g., abnormal structural and brightness changes. While 3D DMs do exist, significant training costs and data dependency bring hesitation. As a solution, we propose novel style key conditioning (SKC) and inter-slice trajectory alignment (ISTA) sampling for the 2D Brownian bridge diffusion model. Specifically, SKC ensures a consistent imaging style (e.g., contrast) across slices, and ISTA interconnects the independent sampling of each slice, deterministically achieving style and shape consistent 3D CT-to-MRI translation. To the best of our knowledge, this study is the first to achieve high-quality 3D medical I2I based only on a 2D DM with no extra architectural models. Our experimental results show superior 3D medical I2I than existing 2D and 3D baselines, using in-house CT-MRI dataset and BraTS2023 FLAIR-T1 MRI dataset.", "title":"Slice-Consistent 3D Volumetric Brain CT-to-MRI Translation with 2D Brownian Bridge Diffusion Model", "authors":[ "Choo, Kyobin", "Jun, Youngjun", "Yun, Mijin", "Hwang, Seong Jae" ], "id":"Conference", "arxiv_id":"2407.05059", "GitHub":[ "https:\/\/github.com\/MICV-yonsei\/CT2MRI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":8 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1741_paper.pdf", "bibtext":"@InProceedings{ Zha_DSCENet_MICCAI2024,\n author = { Zhang, Yuan and Qi, Yaolei and Qi, Xiaoming and Wei, Yongyue and Yang, Guanyu },\n title = { { DSCENet: Dynamic Screening and Clinical-Enhanced Multimodal Fusion for MPNs Subtype Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"The precise subtype classification of myeloproliferative neoplasms\n(MPNs) based on multimodal information, which assists clinicians\nin diagnosis and long-term treatment plans, is of great clinical significance. However, it remains a great challenging task due to the\nlack of diagnostic representativeness for local patches and the absence of diagnostic-relevant features from a single modality. In this paper, we propose a Dynamic Screening and Clinical-Enhanced Network (DSCENet) for the subtype classification of MPNs on the multimodal fusion of whole slide images (WSIs) and clinical information. (1) A dynamic screening module is proposed to flexibly adapt the feature learning of local patches, reducing the interference of irrelevant features and enhancing their diagnostic representativeness. (2) A clinical-enhanced fusion module is proposed to integrate clinical indicators to explore complementary\nfeatures across modalities, providing comprehensive diagnostic information. Our approach has been validated on the real clinical data, achieving an increase of 7.91% AUC and 16.89% accuracy compared with the previous state-of-the-art (SOTA) methods. The code is available at https:\/\/github.com\/yuanzhang7\/DSCENet.", "title":"DSCENet: Dynamic Screening and Clinical-Enhanced Multimodal Fusion for MPNs Subtype Classification", "authors":[ "Zhang, Yuan", "Qi, Yaolei", "Qi, Xiaoming", "Wei, Yongyue", "Yang, Guanyu" ], "id":"Conference", "arxiv_id":"2407.08167", "GitHub":[ "https:\/\/github.com\/yuanzhang7\/DSCENet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":9 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0263_paper.pdf", "bibtext":"@InProceedings{ Zeg_LaTiM_MICCAI2024,\n author = { Zeghlache, Rachid and Conze, Pierre-Henri and El Habib Daho, Mostafa and Li, Yihao and Le Boit\u00e9, Hugo and Tadayoni, Ramin and Massin, Pascale and Cochener, B\u00e9atrice and Rezaei, Alireza and Brahim, Ikram and Quellec, Gwenol\u00e9 and Lamard, Mathieu },\n title = { { LaTiM: Longitudinal representation learning in continuous-time models to predict disease progression } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"This work proposes a novel framework for analyzing disease progression using time-aware neural ordinary differential equations (NODE). We introduce a \u201ctime-aware head\u201d in a framework trained through self-supervised learning (SSL) to leverage temporal information in latent space for data augmentation. This approach effectively integrates NODEs with SSL, offering significant performance improvements compared to traditional methods that lack explicit temporal integration. We demonstrate the effectiveness of our strategy for diabetic retinopathy progression prediction using the OPHDIAT database. Compared to the baseline, all NODE architectures achieve statistically significant improvements in area under the ROC curve (AUC) and Kappa metrics, highlighting the efficacy of pre-training with SSL-inspired approaches. Additionally, our framework promotes stable training for NODEs, a commonly encountered challenge in time-aware modeling.", "title":"LaTiM: Longitudinal representation learning in continuous-time models to predict disease progression", "authors":[ "Zeghlache, Rachid", "Conze, Pierre-Henri", "El Habib Daho, Mostafa", "Li, Yihao", "Le Boit\u00e9, Hugo", "Tadayoni, Ramin", "Massin, Pascale", "Cochener, B\u00e9atrice", "Rezaei, Alireza", "Brahim, Ikram", "Quellec, Gwenol\u00e9", "Lamard, Mathieu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":10 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0219_paper.pdf", "bibtext":"@InProceedings{ Zhe_ABayesian_MICCAI2024,\n author = { Zheng, Zhou and Hayashi, Yuichiro and Oda, Masahiro and Kitasaka, Takayuki and Mori, Kensaku },\n title = { { A Bayesian Approach to Weakly-supervised Laparoscopic Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we study weakly-supervised laparoscopic image segmentation with sparse annotations. We introduce a novel Bayesian deep learning approach designed to enhance both the accuracy and interpretability of the model\u2019s segmentation, founded upon a comprehensive Bayesian framework, ensuring a robust and theoretically validated method. Our approach diverges from conventional methods that directly train using observed images and their corresponding weak annotations. Instead, we estimate the joint distribution of both images and labels given the acquired data. This facilitates the sampling of images and their high-quality pseudo-labels, enabling the training of a generalizable segmentation model. Each component of our model is expressed through probabilistic formulations, providing a coherent and interpretable structure. This probabilistic nature benefits accurate and practical learning from sparse annotations and equips our model with the ability to quantify uncertainty. Extensive evaluations with two public laparoscopic datasets demonstrated the efficacy of our method, which consistently outperformed existing methods. Furthermore, our method was adapted for scribble-supervised cardiac multi-structure segmentation, presenting competitive performance compared to previous methods. The code is available at https:\/\/github.com\/MoriLabNU\/Bayesian_WSS.", "title":"A Bayesian Approach to Weakly-supervised Laparoscopic Image Segmentation", "authors":[ "Zheng, Zhou", "Hayashi, Yuichiro", "Oda, Masahiro", "Kitasaka, Takayuki", "Mori, Kensaku" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MoriLabNU\/Bayesian_WSS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":11 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0470_paper.pdf", "bibtext":"@InProceedings{ Li_Endora_MICCAI2024,\n author = { Li, Chenxin and Liu, Hengyu and Liu, Yifan and Feng, Brandon Y. and Li, Wuyang and Liu, Xinyu and Chen, Zhen and Shao, Jing and Yuan, Yixuan },\n title = { { Endora: Video Generation Models as Endoscopy Simulators } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generative models hold promise for revolutionizing medical education, robot-assisted surgery, and data augmentation for machine learning. Despite progress in generating 2D medical images, the complex domain of clinical video generation has largely remained untapped. This paper introduces Endora, an innovative approach to generate medical videos to simulate clinical endoscopy scenes. We present a novel generative model design that integrates a meticulously crafted video transformer with advanced 2D vision foundation model priors, explicitly modeling spatial-temporal dynamics during video generation. We also pioneer the first public benchmark for endoscopy simulation with video generation models, adapting existing state-of-the-art methods for this endeavor. Endora demonstrates exceptional visual quality in generating endoscopy videos, surpassing state-of-the-art methods in extensive testing. Moreover, we explore how this endoscopy simulator can empower downstream video analysis tasks and even generate 3D medical scenes with multi-view consistency. In a nutshell, Endora marks a notable breakthrough in the deployment of generative AI for clinical endoscopy research, setting a substantial stage for further advances in medical content generation. Project page: https:\/\/endora-medvidgen.github.io\/.", "title":"Endora: Video Generation Models as Endoscopy Simulators", "authors":[ "Li, Chenxin", "Liu, Hengyu", "Liu, Yifan", "Feng, Brandon Y.", "Li, Wuyang", "Liu, Xinyu", "Chen, Zhen", "Shao, Jing", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":12 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2441_paper.pdf", "bibtext":"@InProceedings{ Tei_Towards_MICCAI2024,\n author = { Teichmann, Marvin Tom and Datar, Manasi and Kratzke, Lisa and Vega, Fernando and Ghesu, Florin C. },\n title = { { Towards Integrating Epistemic Uncertainty Estimation into the Radiotherapy Workflow } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The precision of contouring target structures and organs-at-risk (OAR) in radiotherapy planning is crucial for ensuring treatment efficacy and patient safety. Recent advancements in deep learning (DL) have significantly improved OAR contouring performance, yet the reliability of these models, especially in the presence of out-of-distribution (OOD) scenarios, remains a concern in clinical settings. This application study explores the integration of epistemic uncertainty estimation within the OAR contouring workflow to enable OOD detection in clinically relevant scenarios, using specifically compiled data. Furthermore, we introduce an advanced statistical method for OOD detection to enhance the methodological framework of uncertainty estimation. Our empirical evaluation demonstrates that epistemic uncertainty estimation is effective in identifying instances where model predictions are unreliable and may require an expert review. Notably, our approach achieves an AUC-ROC of 0.95 for OOD detection, with a specificity of 0.95 and a sensitivity of 0.92 for implant cases, underscoring its efficacy. This study addresses significant gaps in the current research landscape, such as the lack of ground truth for uncertainty estimation and limited empirical evaluations. This study addresses significant gaps in the current research landscape, such as the lack of ground truth for uncertainty estimation and limited empirical evaluations. Additionally, it provides a clinically relevant application of epistemic uncertainty estimation in an FDA-approved and widely used clinical solution for OAR segmentation from Varian, a Siemens Healthineers company, highlighting its practical benefits.", "title":"Towards Integrating Epistemic Uncertainty Estimation into the Radiotherapy Workflow", "authors":[ "Teichmann, Marvin Tom", "Datar, Manasi", "Kratzke, Lisa", "Vega, Fernando", "Ghesu, Florin C." ], "id":"Conference", "arxiv_id":"2409.18628", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":13 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2916_paper.pdf", "bibtext":"@InProceedings{ Maa_CoReEcho_MICCAI2024,\n author = { Maani, Fadillah Adamsyah and Saeed, Numan and Matsun, Aleksandr and Yaqub, Mohammad },\n title = { { CoReEcho: Continuous Representation Learning for 2D+time Echocardiography Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning (DL) models have been advancing automatic medical image analysis on various modalities, including echocardiography, by offering a comprehensive end-to-end training pipeline. This approach enables DL models to regress ejection fraction (EF) directly from 2D+time echocardiograms, resulting in superior performance. However, the end-to-end training pipeline makes the learned representations less explainable. The representations may also fail to capture the continuous relation among echocardiogram clips, indicating the existence of spurious correlations, which can negatively affect the generalization. To mitigate this issue, we propose CoReEcho, a novel training framework emphasizing continuous representations tailored for direct EF regression. Our extensive experiments demonstrate that CoReEcho: 1) outperforms the current state-of-the-art (SOTA) on the largest echocardiography dataset (EchoNet-Dynamic) with MAE of 3.90 & R2 of 82.44, and 2) provides robust and generalizable features that transfer more effectively in related downstream tasks. The code is publicly available at https:\/\/github.com\/BioMedIA-MBZUAI\/CoReEcho.", "title":"CoReEcho: Continuous Representation Learning for 2D+time Echocardiography Analysis", "authors":[ "Maani, Fadillah Adamsyah", "Saeed, Numan", "Matsun, Aleksandr", "Yaqub, Mohammad" ], "id":"Conference", "arxiv_id":"2403.10164", "GitHub":[ "https:\/\/github.com\/BioMedIA-MBZUAI\/CoReEcho" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":14 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4161_paper.pdf", "bibtext":"@InProceedings{ Ina_FewShot_MICCAI2024,\n author = { Inayat, Sumayya and Dilawar, Nimra and Sultani, Waqas and Ali, Mohsen },\n title = { { Few-Shot Domain Adaptive Object Detection for Microscopic Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Currently, unsupervised domain adaptive strategies proposed to overcome domain shift, are handicapped by the requirement of large amount of target data. On the other hand medical imaging problems and datasets are often characterized not only by scarcity of labeled and unlabeled data but also class imbalance. Few-shot domain adaptive object detection (FSDAOD) addresses the challenge of adapting object detectors to target domains with limited labeled data. However, existing FSDAOD works struggle with randomly selected target domain images which might not represent the target distribution, resulting in overfitting and poor generalization. We propose a novel FSDAOD strategy for microscopic imaging to tackle high-class imbalance and localization errors due to foreground-background similarity. Our contributions include: a domain adaptive class balancing strategy for few shot scenario and label dependent cross domain feature alignment. Specifically, multi-layer instance-level inter and intra-domain feature alignment is performed by enhancing similarity between the instances of classes regardless of the domain and increasing dissimilarity between instances of different classes. In order to retain the features necessary for localizing and detecting minute texture variations in microscopic objects across the domain, the classification loss was applied at feature-map before the detection head. Extensive experimental results with competitive baselines indicate the effectiveness of our proposed approach, achieving state-of-the-art results on two public microscopic datasets, M5 [12] and Raabin-WBC [10]. Our method outperformed both datasets, increasing average mAP@50 by 8.3 points and 14.6 points, respectively. The project page is available here.", "title":"Few-Shot Domain Adaptive Object Detection for Microscopic Images", "authors":[ "Inayat, Sumayya", "Dilawar, Nimra", "Sultani, Waqas", "Ali, Mohsen" ], "id":"Conference", "arxiv_id":"2407.07633", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":15 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4098_paper.pdf", "bibtext":"@InProceedings{ Zha_ANew_MICCAI2024,\n author = { Zhang, Jiansong and Wu, Shengnan and Liu, Peizhong and Shen, Linlin },\n title = { { A New Dataset and Baseline Model for Rectal Cancer Risk Assessment in Endoscopic Ultrasound Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Early diagnosis of rectal cancer is essential to improve patient survival. Existing diagnostic methods mainly rely on complex MRI as well as pathology-level co-diagnosis. In contrast, in this paper, we collect and annotate for the first time a rectal cancer ultrasound en- doscopy video dataset containing 207 patients for rectal cancer video risk assessment. Additionally, we introduce the Rectal Cancer Video Risk Assessment Network (RCVA-Net), a temporal logic-based framework designed to tackle the classification of rectal cancer ultrasound endoscopy videos. In RCVA-Net, we propose a novel adjacent frames fusion module that effectively integrates the temporal local features from the original video with the global features of the sampled video frames. The intra-video fusion module is employed to capture and learn the temporal dynamics between neighbouring video frames, enhancing the network\u2019s ability to discern subtle nuances in video sequences. Furthermore, we enhance the classification of rectal cancer by randomly incorporating video-level features extracted from the original videos, thereby significantly boosting the performance of rectal cancer classification using ultrasound endoscopic videos. Experimental results on our labelled dataset show that our RCVA-Net can serve as a scalable baseline model with leading performance. The code of this paper can be accessed at https:\/\/github.com\/JsongZhang\/RCVA-Net.", "title":"A New Dataset and Baseline Model for Rectal Cancer Risk Assessment in Endoscopic Ultrasound Videos", "authors":[ "Zhang, Jiansong", "Wu, Shengnan", "Liu, Peizhong", "Shen, Linlin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":16 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3762_paper.pdf", "bibtext":"@InProceedings{ Ji_DeformMamba_MICCAI2024,\n author = { Ji, Zexin and Zou, Beiji and Kui, Xiaoyan and Vera, Pierre and Ruan, Su },\n title = { { Deform-Mamba Network for MRI Super-Resolution } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we propose a new architecture, called Deform-Mamba, for MR image super-resolution. Unlike conventional CNN or Transformer-based super-resolution approaches which encounter challenges related to the local respective field or heavy computational cost, our approach aims to effectively explore the local and global information of images. Specifically, we develop a Deform-Mamba encoder which is composed of two branches, modulated deform block and vision Mamba block. We also design a multi-view context module in the bottleneck layer to explore the multi-view contextual content. Thanks to the extracted features of the encoder, which include content-adaptive local and efficient global information, the vision Mamba decoder finally generates high-quality MR images. Moreover, we introduce a contrastive edge loss to promote the reconstruction of edge and contrast related content. Quantitative and qualitative experimental results indicate that our approach on IXI and fastMRI datasets achieves competitive performance.", "title":"Deform-Mamba Network for MRI Super-Resolution", "authors":[ "Ji, Zexin", "Zou, Beiji", "Kui, Xiaoyan", "Vera, Pierre", "Ruan, Su" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":17 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0814_paper.pdf", "bibtext":"@InProceedings{ Wan_Dynamic_MICCAI2024,\n author = { Wang, Ziyue and Zhang, Ye and Wang, Yifeng and Cai, Linghan and Zhang, Yongbing },\n title = { { Dynamic Pseudo Label Optimization in Point-Supervised Nuclei Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has achieved impressive results in nuclei segmentation, but the massive requirement for pixel-wise labels remains a significant challenge. To alleviate the annotation burden, existing methods generate pseudo masks for model training using point labels. However, the generated masks are inevitably different from the ground truth, and these dissimilarities are not handled reasonably during the network training, resulting in the subpar performance of the segmentation model. To tackle this issue, we propose a framework named DoNuSeg, enabling Dynamic pseudo label Optimization in point-supervised Nuclei Segmentation. Specifically, DoNuSeg takes advantage of class activation maps (CAMs) to adaptively capture regions with semantics similar to annotated points. To leverage semantic diversity in the hierarchical feature levels, we design a dynamic selection module to choose the optimal one among CAMs from different encoder blocks as pseudo masks. Meanwhile, a CAM-guided contrastive module is proposed to further enhance the accuracy of pseudo masks. In addition to exploiting the semantic information provided by CAMs, we consider location priors inherent to point labels, developing a task-decoupled structure for effectively differentiating nuclei. Extensive experiments demonstrate that DoNuSeg outperforms state-of-the-art point-supervised methods.", "title":"Dynamic Pseudo Label Optimization in Point-Supervised Nuclei Segmentation", "authors":[ "Wang, Ziyue", "Zhang, Ye", "Wang, Yifeng", "Cai, Linghan", "Zhang, Yongbing" ], "id":"Conference", "arxiv_id":"2406.16427", "GitHub":[ "https:\/\/github.com\/shinning0821\/MICCAI24-DoNuSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":18 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3296_paper.pdf", "bibtext":"@InProceedings{ Cui_Multilevel_MICCAI2024,\n author = { Cui, Xiaoxiao and Jiang, Shanzhi and Sun, Baolin and Li, Yiran and Cao, Yankun and Li, Zhen and Lv, Chaoyang and Liu, Zhi and Cui, Lizhen and Li, Shuo },\n title = { { Multilevel Causality Learning for Multi-label Gastric Atrophy Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"No studies have formulated endoscopic grading (EG) of gastric atrophy (GA) as a multi-label classification (MLC) problem, which requires the simultaneous detection of GA and its gastric sites during an endoscopic examination. Accurate EG of GA is crucial for assessing the progression of early gastric cancer. However, the strong visual interference in endoscopic images is caused by various inter-image differences and subtle intra-image differences, leading to confounding contexts and hindering the causalities between class-aware features (CAFs) and multi-label predictions. We propose a multilevel causality learning approach for multi-label gastric atrophy diagnosis for the first time, to learn robust causal CAFs by de-confounding multilevel confounders. Our multilevel causal model is built based on a transformer to construct a multilevel confounder set and implement a progressive causal intervention (PCI) on it. Specifically, the confounder set is constructed by a dual token path sampling module that leverages multiple class tokens and different hidden states of patch tokens to stratify various visual interference. PCI involves attention-based sample-level re-weighting and uncertainty-guided logit-level modulation. Comparative experiments on an endoscopic dataset demonstrate the significant improvement of our model, such as IDA (0.95% on OP, and 0.65% on mAP) and TS-Former (1.11% on OP, and 1.05% on mAP).\n\\keywords{Multi-label Classification \\and Causal Intervention \\and Gastric Atrophy Detection.}", "title":"Multilevel Causality Learning for Multi-label Gastric Atrophy Diagnosis", "authors":[ "Cui, Xiaoxiao", "Jiang, Shanzhi", "Sun, Baolin", "Li, Yiran", "Cao, Yankun", "Li, Zhen", "Lv, Chaoyang", "Liu, Zhi", "Cui, Lizhen", "Li, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/rabbittsui\/Multilevel-Causal" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":19 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2700_paper.pdf", "bibtext":"@InProceedings{ Chi_LowShot_MICCAI2024,\n author = { Chikontwe, Philip and Kang, Myeongkyun and Luna, Miguel and Nam, Siwoo and Park, Sang Hyun },\n title = { { Low-Shot Prompt Tuning for Multiple Instance Learning based Histology Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"In recent years, prompting pre-trained visual-language (VL) models has shown excellent generalization to various downstream tasks in both natural and medical images. However, VL models are sensitive to the choice of input text prompts, requiring careful selection of templates. Moreover, prompt tuning in the weakly supervised\/multiple-instance (MIL) setting is fairly under-explored, especially in the field of computational pathology. In this work, we present a novel prompt tuning framework leveraging frozen VL encoders with (i) residual visual feature adaptation, and (ii) text-based context prompt optimization for whole slide image (WSI) level tasks i.e., classification. In contrast with existing approaches using variants of attention-based instance pooling for slide-level representations, we propose synergistic prompt-based pooling of multiple instances as the weighted sum of learnable-context and slide features. By leveraging the mean learned-prompt vectors and pooled slide features, our design facilitates different slide-level tasks. Extensive experiments on public WSI benchmark datasets reveal significant gains over existing prompting methods, including standard baseline multiple instance learners.", "title":"Low-Shot Prompt Tuning for Multiple Instance Learning based Histology Classification", "authors":[ "Chikontwe, Philip", "Kang, Myeongkyun", "Luna, Miguel", "Nam, Siwoo", "Park, Sang Hyun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":20 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3783_paper.pdf", "bibtext":"@InProceedings{ Leg_Eddeep_MICCAI2024,\n author = { Legouhy, Antoine and Callaghan, Ross and Stee, Whitney and Peigneux, Philippe and Azadbakht, Hojjat and Zhang, Hui },\n title = { { Eddeep: Fast eddy-current distortion correction for diffusion MRI with deep learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Modern diffusion MRI sequences commonly acquire a large number of volumes with diffusion sensitization gradients of differing strengths or directions. Such sequences rely on echo-planar imaging (EPI) to achieve reasonable scan duration. However, EPI is vulnerable to off-resonance effects, leading to tissue susceptibility and eddy-current induced distortions. The latter is particularly problematic because it causes misalignment between volumes, disrupting downstream modelling and analysis. The essential correction of eddy distortions is typically done post-acquisition, with image registration. However, this is non-trivial because correspondence between volumes can be severely disrupted due to volume-specific signal attenuations induced by varying directions and strengths of the applied gradients. This challenge has been successfully addressed by the popular FSL Eddy tool but at considerable computational cost. We propose an alternative approach, leveraging recent advances in image processing enabled by deep learning (DL). It consists of two convolutional neural networks: 1) An image translator to restore correspondence between images; 2) A registration model to align the translated images. Results demonstrate comparable distortion estimates to FSL Eddy, while requiring only modest training sample sizes. This work, to the best of our knowledge, is the first to tackle this problem with deep learning. Together with recently developed DL-based susceptibility correction techniques, they pave the way for real-time preprocessing of diffusion MRI, facilitating its wider uptake in the clinic.", "title":"Eddeep: Fast eddy-current distortion correction for diffusion MRI with deep learning", "authors":[ "Legouhy, Antoine", "Callaghan, Ross", "Stee, Whitney", "Peigneux, Philippe", "Azadbakht, Hojjat", "Zhang, Hui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "github.com\/CIG-UCL\/eddeep" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":21 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0077_paper.pdf", "bibtext":"@InProceedings{ Bie_XCoOp_MICCAI2024,\n author = { Bie, Yequan and Luo, Luyang and Chen, Zhixuan and Chen, Hao },\n title = { { XCoOp: Explainable Prompt Learning for Computer-Aided Diagnosis via Concept-guided Context Optimization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Utilizing potent representations of the large vision-language models (VLMs) to accomplish various downstream tasks has attracted increasing attention. Within this research field, soft prompt learning has become a representative approach for efficiently adapting VLMs such as CLIP, to tasks like image classification. However, most existing prompt learning methods learn text tokens that are unexplainable, which cannot satisfy the stringent interpretability requirements of Explainable Artificial Intelligence (XAI) in high-stakes scenarios like healthcare. To address this issue, we propose a novel explainable prompt learning framework that leverages medical knowledge by aligning the semantics between images, learnable prompts, and clinical concept-driven prompts at multiple granularities. Moreover, our framework addresses the lack of valuable concept annotations by eliciting knowledge from large language models and offers both visual and textual explanations for the prompts. Extensive experiments and explainability analyses conducted on various datasets, with and without concept labels, demonstrate that our method simultaneously achieves superior diagnostic performance, flexibility, and interpretability, shedding light on the effectiveness of foundation models in facilitating XAI.", "title":"XCoOp: Explainable Prompt Learning for Computer-Aided Diagnosis via Concept-guided Context Optimization", "authors":[ "Bie, Yequan", "Luo, Luyang", "Chen, Zhixuan", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2403.09410", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":22 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1738_paper.pdf", "bibtext":"@InProceedings{ Tan_Clinicalgrade_MICCAI2024,\n author = { Tan, Jing Wei and Kim, SeungKyu and Kim, Eunsu and Lee, Sung Hak and Ahn, Sangjeong and Jeong, Won-Ki },\n title = { { Clinical-grade Multi-Organ Pathology Report Generation for Multi-scale Whole Slide Images via a Semantically Guided Medical Text Foundation Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Vision language models (VLM) have achieved success in both natural language comprehension and image recognition tasks. However, their use in pathology report generation for whole slide images (WSIs) is still limited due to the huge size of multi-scale WSIs and the high cost of WSI annotation. Moreover, in most of the existing research on pathology report generation, sufficient validation regarding clinical efficacy has not been conducted. Herein, we propose a novel Patient-level Multi-organ Pathology Report Generation (PMPRG) model, which utilizes the multi-scale WSI features from our proposed MR-ViT model and their real pathology reports to guide VLM training for accurate pathology report generation. The model then automatically generates a report based on the provided key features-attended regional features. We assessed our model using a WSI dataset consisting of multiple organs, including the colon and kidney. Our model achieved a METEOR score of 0.68, demonstrating the effectiveness of our approach. This model allows pathologists to efficiently generate pathology reports for patients, regardless of the number of WSIs involved.", "title":"Clinical-grade Multi-Organ Pathology Report Generation for Multi-scale Whole Slide Images via a Semantically Guided Medical Text Foundation Model", "authors":[ "Tan, Jing Wei", "Kim, SeungKyu", "Kim, Eunsu", "Lee, Sung Hak", "Ahn, Sangjeong", "Jeong, Won-Ki" ], "id":"Conference", "arxiv_id":"2409.15574", "GitHub":[ "https:\/\/github.com\/hvcl\/Clinical-grade-Pathology-Report-Generation\/tree\/main" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":23 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1524_paper.pdf", "bibtext":"@InProceedings{ Liu_SemanticsAware_MICCAI2024,\n author = { Liu, Kechun and Wu, Wenjun and Elmore, Joann G. and Shapiro, Linda G. },\n title = { { Semantics-Aware Attention Guidance for Diagnosing Whole Slide Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate cancer diagnosis remains a critical challenge in digital pathology, largely due to the gigapixel size and complex spatial relationships present in whole slide images. Traditional multiple instance learning (MIL) methods often struggle with these intricacies, especially in preserving the necessary context for accurate diagnosis. In response, we introduce a novel framework named Semantics-Aware Attention Guidance (SAG), which includes 1) a technique for converting diagnostically relevant entities into attention signals, and 2) a flexible attention loss that efficiently integrates various semantically significant information, such as tissue anatomy and cancerous regions. Our experiments on two distinct cancer datasets demonstrate consistent improvements in accuracy, precision, and recall with two state-of-the-art baseline models. Qualitative analysis further reveals that the incorporation of heuristic guidance enables the model to focus on regions critical for diagnosis. SAG is not only effective for the models discussed here, but its adaptability extends to any attention-based diagnostic model. This opens up exciting possibilities for further improving the accuracy and efficiency of cancer diagnostics.", "title":"Semantics-Aware Attention Guidance for Diagnosing Whole Slide Images", "authors":[ "Liu, Kechun", "Wu, Wenjun", "Elmore, Joann G.", "Shapiro, Linda G." ], "id":"Conference", "arxiv_id":"2404.10894", "GitHub":[ "https:\/\/github.com\/kechunl\/SAG" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":24 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1877_paper.pdf", "bibtext":"@InProceedings{ Di_Interpretable_MICCAI2024,\n author = { Di Folco, Maxime and Bercea, Cosmin I. and Chan, Emily and Schnabel, Julia A. },\n title = { { Interpretable Representation Learning of Cardiac MRI via Attribute Regularization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Interpretability is essential in medical imaging to ensure that clinicians can comprehend and trust artificial intelligence models. Several approaches have been recently considered to encode attributes in the latent space to enhance its interpretability. Notably, attribute regularization aims to encode a set of attributes along the dimensions of a latent representation. However, this approach is based on Variational AutoEncoder and suffers from blurry reconstruction. In this paper, we propose an Attributed-regularized Soft Introspective Variational Autoencoder that combines attribute regularization of the latent space within the framework of an adversarially trained variational autoencoder. We demonstrate on short-axis cardiac Magnetic Resonance images of the UK Biobank the ability of the proposed method to address blurry reconstruction issues of variational autoencoder methods while preserving the latent space interpretability.", "title":"Interpretable Representation Learning of Cardiac MRI via Attribute Regularization", "authors":[ "Di Folco, Maxime", "Bercea, Cosmin I.", "Chan, Emily", "Schnabel, Julia A." ], "id":"Conference", "arxiv_id":"2406.08282", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-di-folco" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":25 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0714_paper.pdf", "bibtext":"@InProceedings{ Xin_Crossconditioned_MICCAI2024,\n author = { Xing, Zhaohu and Yang, Sicheng and Chen, Sixiang and Ye, Tian and Yang, Yijun and Qin, Jing and Zhu, Lei },\n title = { { Cross-conditioned Diffusion Model for Medical Image to Image Translation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multi-modal magnetic resonance imaging (MRI) provides rich, complementary information for analyzing diseases. \nHowever, the practical challenges of acquiring multiple MRI modalities, such as cost, scan time, and safety considerations, often result in incomplete datasets. This affects both the quality of diagnosis and the performance of deep learning models trained on such data. \nRecent advancements in generative adversarial networks (GANs) and denoising diffusion models have shown promise in natural and medical image-to-image translation tasks. However, the complexity of training GANs and the computational expense associated with diffusion models hinder their development and application in this task. \nTo address these issues, we introduce a Cross-conditioned Diffusion Model (CDM) for medical image-to-image translation. \nThe core idea of CDM is to use the distribution of target modalities as guidance to improve synthesis quality, while achieving higher generation efficiency compared to conventional diffusion models.\nFirst, we propose a Modality-specific Representation Model (MRM) to model the distribution of target modalities. Then, we design a Modality-decoupled Diffusion Network (MDN) to efficiently and effectively learn the distribution from MRM. Finally, a Cross-conditioned UNet (C-UNet) with a Condition Embedding module is designed to synthesize the target modalities with the source modalities as input and the target distribution for guidance. Extensive experiments conducted on the BraTS2023 and UPenn-GBM benchmark datasets demonstrate the superiority of our method.", "title":"Cross-conditioned Diffusion Model for Medical Image to Image Translation", "authors":[ "Xing, Zhaohu", "Yang, Sicheng", "Chen, Sixiang", "Ye, Tian", "Yang, Yijun", "Qin, Jing", "Zhu, Lei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":26 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3332_paper.pdf", "bibtext":"@InProceedings{ Li_Exploiting_MICCAI2024,\n author = { Li, Yueheng and Guan, Xianchao and Wang, Yifeng and Zhang, Yongbing },\n title = { { Exploiting Supervision Information in Weakly Paired Images for IHC Virtual Staining } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Immunohistochemical (IHC) staining plays a pivotal role in the evaluation of numerous diseases. However, the standard IHC staining process involves a series of time-consuming and labor-intensive steps, which severely hinders its application in histopathology. With the rapid advancement of deep learning techniques, virtual staining has promising potential to address this issue. But it has long been challenging to determine how to effectively provide supervision information for networks by utilizing consecutive tissue slices. To this end, we propose a weakly supervised pathological consistency constraint acting on multiple layers of GAN. Due to variations of receptive fields in different layers of the network, weakly paired consecutive slices have different degrees of alignment. Thus we allocate adaptive weights to different layers in order to dynamically adjust the supervision strengths of the pathological consistency constraint. Additionally, as an effective deep generative model, GAN can generate high-fidelity images, but it suffers from the issue of discriminator failure. To tackle this issue, a discriminator contrastive regularization method is proposed. It compels the discriminator to contrast the differences between generated images and real images from consecutive layers, thereby enhancing its capability to distinguish virtual images. The experimental results demonstrate that our method generates IHC images from H&E images robustly and identifies cancer regions accurately. Compared to previous methods, our method achieves superior results.", "title":"Exploiting Supervision Information in Weakly Paired Images for IHC Virtual Staining", "authors":[ "Li, Yueheng", "Guan, Xianchao", "Wang, Yifeng", "Zhang, Yongbing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":27 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0281_paper.pdf", "bibtext":"@InProceedings{ Guo_Trimodal_MICCAI2024,\n author = { Guo, Diandian and Lin, Manxi and Pei, Jialun and Tang, He and Jin, Yueming and Heng, Pheng-Ann },\n title = { { Tri-modal Confluence with Temporal Dynamics for Scene Graph Generation in Operating Rooms } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"A comprehensive understanding of surgical scenes allows for monitoring of the surgical process, reducing the occurrence of accidents and enhancing efficiency for medical professionals. Semantic modeling within operating rooms, as a scene graph generation (SGG) task, is challenging since it involves consecutive recognition of subtle surgical actions over prolonged periods. To address this challenge, we propose a Tri-modal (i.e., images, point clouds, and language) confluence with Temporal dynamics framework, termed TriTemp-OR. Diverging from previous approaches that integrated temporal information via memory graphs, our method embraces two advantages: 1) we directly exploit bi-modal temporal information from the video streaming for hierarchical feature interaction, and 2) the prior knowledge from Large Language Models (LLMs) is embedded to alleviate the class-imbalance problem in the operating theatre. Specifically, our model performs temporal interactions across 2D frames and 3D point clouds, including a scale-adaptive multi-view temporal interaction (ViewTemp) and a geometric-temporal point aggregation (PointTemp). Furthermore, we transfer knowledge from the biomedical LLM, LLaVA-Med, to deepen the comprehension of intraoperative relations. The proposed TriTemp-OR enables the aggregation of tri-modal features through relation-aware unification to predict relations to generate scene graphs. Experimental results on the 4D-OR benchmark demonstrate the superior performance of our model for long-term OR streaming. Codes are available at https:\/\/github.com\/RascalGdd\/TriTemp-OR.", "title":"Tri-modal Confluence with Temporal Dynamics for Scene Graph Generation in Operating Rooms", "authors":[ "Guo, Diandian", "Lin, Manxi", "Pei, Jialun", "Tang, He", "Jin, Yueming", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"2404.09231", "GitHub":[ "https:\/\/github.com\/RascalGdd\/TriTemp-OR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":28 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0668_paper.pdf", "bibtext":"@InProceedings{ Par_BlackBox_MICCAI2024,\n author = { Paranjape, Jay N. and Sikder, Shameema and Vedula, S. Swaroop and Patel, Vishal M. },\n title = { { Black-Box Adaptation for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In recent years, various large foundation models have been proposed for image segmentation. These models are often trained on large amounts of data corresponding to general computer vision tasks. Hence, these models do not perform well on medical data. There have been some attempts in the literature to perform parameter-efficient finetuning of such foundation models for medical image segmentation. However, these approaches assume that all the parameters of the model are available for adaptation. But, in many cases, these models are released as APIs or Black-Boxes, with no or limited access to the model parameters and data. In addition, finetuning methods also require a significant amount of compute, which may not be available for the downstream task. At the same time, medical data can\u2019t be shared with third-party agents for finetuning due to privacy reasons. To tackle these challenges, we pioneer a Black-Box adaptation technique for prompted medical image segmentation, called BAPS. BAPS has two components - (i) An Image-Prompt decoder (IP decoder) module that generates visual prompts given an image and a prompt, and (ii) A Zero Order Optimization (ZOO) Method, called SPSA-GC that is used to update the IP decoder without the need for backpropagating through the foundation model. Thus, our method does not require any knowledge about the foundation model\u2019s weights or gradients. We test BAPS on four different modalities and show that our method can improve the original model\u2019s performance by around 4%. The code is available at https:\/\/github.com\/JayParanjape\/Blackbox.", "title":"Black-Box Adaptation for Medical Image Segmentation", "authors":[ "Paranjape, Jay N.", "Sikder, Shameema", "Vedula, S. Swaroop", "Patel, Vishal M." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JayParanjape\/Blackbox" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":29 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2831_paper.pdf", "bibtext":"@InProceedings{ Zho_Efficient_MICCAI2024,\n author = { Zhou, Lingyu and Yi, Zhang and Zhou, Kai and Xu, Xiuyuan },\n title = { { Efficient and Gender-adaptive Graph Vision Mamba for Pediatric Bone Age Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Bone age assessment (BAA) is crucial for evaluating the skeletal maturity of children in pediatric clinics. The decline in assessment accuracy is attributed to the existence of inter-gender disparity. Current automatic methods bridge this gap by relying on bone regions of interest and gender, resulting in high annotation costs. Meanwhile, the models still grapple with efficiency bottleneck for lightweight deployment. To address these challenges, this study presents Gender-adaptive Graph Vision Mamba (GGVMamba) framework with only raw X-ray images. Concretely, a region augmentation process, called directed scan module, is proposed to integrate local context from various directions of bone X-ray images. Then we construct a novel graph Mamba encoder with linear complexity, fostering robust modelling for both within and among region features. Moreover, a gender adaptive strategy is proposed to improve gender consistency by dynamically selecting gender-specific graph structures. Experiments demonstrate that GGVMamba obtains state-of-the-art results with MAE of 3.82, 4.91, and 4.14 on RSNA, RHPE, and DHA, respectively. Notably, GGVMamba shows exceptional gender consistency and optimal efficiency with minimal GPU load. The code is available at https:\/\/github.com\/SCU-zly\/GGVMamba.", "title":"Efficient and Gender-adaptive Graph Vision Mamba for Pediatric Bone Age Assessment", "authors":[ "Zhou, Lingyu", "Yi, Zhang", "Zhou, Kai", "Xu, Xiuyuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/SCU-zly\/GGVMamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":30 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1909_paper.pdf", "bibtext":"@InProceedings{ Gui_CAVM_MICCAI2024,\n author = { Gui, Lujun and Ye, Chuyang and Yan, Tianyi },\n title = { { CAVM: Conditional Autoregressive Vision Model for Contrast-Enhanced Brain Tumor MRI Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contrast-enhanced magnetic resonance imaging (MRI) is pivotal in the pipeline of brain tumor segmentation and analysis. Gadolinium-based contrast agents, as the most commonly used contrast agents, are expensive and may have potential side effects, and it is desired to obtain contrast-enhanced brain tumor MRI scans without the actual use of contrast agents. Deep learning methods have been applied to synthesize virtual contrast-enhanced MRI scans from non-contrast images. However, as this synthesis problem is inherently ill-posed, these methods fall short in producing high-quality results. In this work, we propose Conditional Autoregressive Vision Model (CAVM) for improving the synthesis of contrast-enhanced brain tumor MRI. As the enhancement of image intensity grows with a higher dose of contrast agents, we assume that it is less challenging to synthesize a virtual image with a lower dose, where the difference between the contrast-enhanced and non-contrast images is smaller. Thus, CAVM gradually increases the contrast agent dosage and produces higher-dose images based on previous lower-dose ones until the final desired dose is achieved. Inspired by the resemblance between the gradual dose increase and the Chain-of-Thought approach in natural language processing, CAVM uses an autoregressive strategy with a decomposition tokenizer and a decoder. Specifically, the tokenizer is applied to obtain a more compact image representation for computational efficiency, and it decomposes the image into dose-variant and dose-invariant tokens. Then, a masked self-attention mechanism is developed for autoregression that gradually increases the dose of the virtual image based on the dose-variant tokens. Finally, the updated dose-variant tokens corresponding to the desired dose are decoded together with dose-invariant tokens to produce the final contrast-enhanced MRI. CAVM was validated on the BraSyn-2023 dataset with brain tumor MRI, where it outperforms state-of-the-art methods.", "title":"CAVM: Conditional Autoregressive Vision Model for Contrast-Enhanced Brain Tumor MRI Synthesis", "authors":[ "Gui, Lujun", "Ye, Chuyang", "Yan, Tianyi" ], "id":"Conference", "arxiv_id":"2406.16074", "GitHub":[ "https:\/\/github.com\/Luc4Gui\/CAVM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":31 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3334_paper.pdf", "bibtext":"@InProceedings{ Cha_Forecasting_MICCAI2024,\n author = { Chakravarty, Arunava and Emre, Taha and Lachinov, Dmitrii and Rivail, Antoine and Scholl, Hendrik P. N. and Fritsche, Lars and Sivaprasad, Sobha and Rueckert, Daniel and Lotery, Andrew and Schmidt-Erfurth, Ursula and Bogunovi\u0107, Hrvoje },\n title = { { Forecasting Disease Progression with Parallel Hyperplanes in Longitudinal Retinal OCT } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Predicting future disease progression risk from medical images is challenging due to patient heterogeneity, and subtle or unknown imaging biomarkers. Moreover, deep learning (DL) methods for survival analysis are susceptible to image domain shifts across scanners. We tackle these issues in the task of predicting late dry Age-related Macular Degeneration (dAMD) onset from retinal OCT scans. We propose a novel DL method for survival prediction to jointly predict from the current scan a risk score, inversely related to time-to-conversion, and the probability of conversion within a time interval t. It uses a family of parallel hyperplanes generated by parameterizing the bias term as a function of t. In addition, we develop unsupervised losses based on intra-subject image pairs to ensure that risk scores increase over time and that future conversion predictions are consistent with AMD stage prediction using actual scans of future visits. Such losses enable data efficient fine-tuning of the trained model on new unlabeled datasets acquired with a different scanner. Extensive evaluation on two large datasets acquired with different scanners resulted in a mean AUROCs of 0.82 for Dataset-1 and 0.83 for Dataset-2, across prediction intervals of 6,12 and 24 months.", "title":"Forecasting Disease Progression with Parallel Hyperplanes in Longitudinal Retinal OCT", "authors":[ "Chakravarty, Arunava", "Emre, Taha", "Lachinov, Dmitrii", "Rivail, Antoine", "Scholl, Hendrik P. N.", "Fritsche, Lars", "Sivaprasad, Sobha", "Rueckert, Daniel", "Lotery, Andrew", "Schmidt-Erfurth, Ursula", "Bogunovi\u0107, Hrvoje" ], "id":"Conference", "arxiv_id":"2409.20195", "GitHub":[ "https:\/\/github.com\/arunava555\/Forecast_parallel_hyperplanes" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":32 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2112_paper.pdf", "bibtext":"@InProceedings{ Nae_Trexplorer_MICCAI2024,\n author = { Naeem, Roman and Hagerman, David and Svensson, Lennart and Kahl, Fredrik },\n title = { { Trexplorer: Recurrent DETR for Topologically Correct Tree Centerline Tracking } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tubular structures with tree topology such as blood vessels, lung airways, and more are abundant in human anatomy. Tracking these structures with correct topology is crucial for many downstream tasks that help in early detection of conditions such as vascular and pulmonary diseases. Current methods for centerline tracking suffer from predicting topologically incorrect centerlines and complex model pipelines. To mitigate these issues we propose Trexplorer, a recurrent DETR based model that tracks topologically correct centerlines of tubular tree objects in 3D volumes using a simple model pipeline. We demonstrate the model\u2019s performance on a publicly available synthetic vessel centerline dataset and show that our model outperforms the state-of-the-art on centerline topology and graph-related metrics, and performs well on detection metrics. The code is available at https:\/\/github.com\/RomStriker\/Trexplorer.", "title":"Trexplorer: Recurrent DETR for Topologically Correct Tree Centerline Tracking", "authors":[ "Naeem, Roman", "Hagerman, David", "Svensson, Lennart", "Kahl, Fredrik" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/RomStriker\/Trexplorer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":33 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2717_paper.pdf", "bibtext":"@InProceedings{ Che_FedEvi_MICCAI2024,\n author = { Chen, Jiayi and Ma, Benteng and Cui, Hengfei and Xia, Yong },\n title = { { FedEvi: Improving Federated Medical Image Segmentation via Evidential Weight Aggregation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning enables collaborative knowledge acquisition among clinical institutions while preserving data privacy. However, feature heterogeneity across institutions can compromise the global model\u2019s performance and generalization capability. Existing methods often adjust aggregation weights dynamically to improve the global model\u2019s generalization but rely heavily on the local models\u2019 performance or reliability, excluding an explicit measure of the generalization gap arising from deploying the global model across varied local datasets. To address this issue, we propose FedEvi, a method that adjusts the aggregation weights based on the generalization gap between the global model and each local dataset and the reliability of local models. We utilize a Dirichlet-based evidential model to disentangle the uncertainty representation of each local model and the global model into epistemic uncertainty and aleatoric uncertainty. Then, we quantify the global generalization gap using the epistemic uncertainty of the global model and assess the reliability of each local model using its aleatoric uncertainty. Afterward, we design aggregation weights using the global generalization gap and local reliability. Comprehensive experimentation reveals that FedEvi consistently surpasses 12 state-of-the-art methods across three real-world multi-center medical image segmentation tasks, demonstrating the effectiveness of FedEvi in bolstering the generalization capacity of the global model in heterogeneous federated scenarios. The code will be available at\nhttps:\/\/github.com\/JiayiChen815\/FedEvi.", "title":"FedEvi: Improving Federated Medical Image Segmentation via Evidential Weight Aggregation", "authors":[ "Chen, Jiayi", "Ma, Benteng", "Cui, Hengfei", "Xia, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JiayiChen815\/FedEvi" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":34 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1439_paper.pdf", "bibtext":"@InProceedings{ Tei_CTbased_MICCAI2024,\n author = { Teimouri, Reihaneh and Kersten-Oertel, Marta and Xiao, Yiming },\n title = { { CT-based brain ventricle segmentation via diffusion Schro\u0308dinger Bridge without target domain ground truths } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Efficient and accurate brain ventricle segmentation from clinical CT scans is critical for emergency surgeries like ventriculostomy. With the challenges in poor soft tissue contrast and a scarcity of well-annotated databases for clinical brain CTs, we introduce a novel uncertainty-aware ventricle segmentation technique without the need of CT segmentation ground truths by leveraging diffusion-model-based domain adaptation. Specifically, our method employs the diffusion Schr\u00f6dinger Bridge and an attention recurrent residual U-Net to capitalize on unpaired CT and MRI scans to derive automatic CT segmentation from those of the MRIs, which are more accessible. Importantly, we propose an end-to-end, joint training framework of image translation and segmentation tasks, and demonstrate its benefit over training individual tasks separately. By comparing the proposed method against similar setups using two different GAN models for domain adaptation (CycleGAN and CUT), we also reveal the advantage of diffusion models towards improved segmentation and image translation quality. With a Dice score of 0.78\u00b10.27, our proposed method outperformed the compared methods, including SynSeg-Net, while providing intuitive uncertainty measures to further facilitate quality control of the automatic segmentation outcomes. The code is available at: https:\/\/github.com\/HealthX-Lab\/DiffusionSynCTSeg.", "title":"CT-based brain ventricle segmentation via diffusion Schro\u0308dinger Bridge without target domain ground truths", "authors":[ "Teimouri, Reihaneh", "Kersten-Oertel, Marta", "Xiao, Yiming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HealthX-Lab\/DiffusionSynCTSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":35 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3180_paper.pdf", "bibtext":"@InProceedings{ Kam_Is_MICCAI2024,\n author = { Kampen, Peter Johannes Tejlgaard and Christensen, Anders Nymark and Hannemose, Morten Rieger },\n title = { { Is this hard for you? Personalized human difficulty estimation for skin lesion diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Predicting the probability of human error is an important problem with applications ranging from optimizing learning environments to distributing cases among doctors in a clinic. In both of these instances, predicting the probability of error is equivalent to predicting the difficulty of the assignment, e.g., diagnosing a specific image of a skin lesion. However, the difficulty of a case is subjective since what is difficult for one person is not necessarily difficult for another. We present a novel approach for personalized estimation of human difficulty, using a transformer-based neural network that looks at previous cases and if the user answered these correctly. We demonstrate our method on doctors diagnosing skin lesions and on a language learning data set showing generalizability across domains. Our approach utilizes domain representations by first encoding each case using pre-trained neural networks and subsequently using these as tokens in a sequence modeling task. We significantly outperform all baselines, both for cases that are in the training set and for unseen cases. Additionally, we show that our method is robust towards the quality of the embeddings and how the performance increases as more answers from a user are available. Our findings suggest that this approach could pave the way for truly personalized learning experiences in medical diagnostics, enhancing the quality of patient care.", "title":"Is this hard for you? Personalized human difficulty estimation for skin lesion diagnosis", "authors":[ "Kampen, Peter Johannes Tejlgaard", "Christensen, Anders Nymark", "Hannemose, Morten Rieger" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":36 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1010_paper.pdf", "bibtext":"@InProceedings{ Din_CrossModality_MICCAI2024,\n author = { Ding, Zhengyao and Hu, Yujian and Li, Ziyu and Zhang, Hongkun and Wu, Fei and Xiang, Yilang and Li, Tian and Liu, Ziyi and Chu, Xuesen and Huang, Zhengxing },\n title = { { Cross-Modality Cardiac Insight Transfer: A Contrastive Learning Approach to Enrich ECG with CMR Features } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cardiovascular diseases are the leading cause of death worldwide, and accurate diagnostic tools are crucial for their early detection and treatment. Electrocardiograms (ECG) offer a non-invasive and widely accessible diagnostic method. Despite their convenience, they are limited in providing in-depth cardiovascular information. On the other hand, Cardiac Magnetic Resonance Imaging (CMR) can reveal detailed structural and functional heart information; however, it is costly and not widely accessible. This study aims to bridge this gap through a contrastive learning framework that deeply integrates ECG data with insights from CMR, allowing the extraction of cardiovascular information solely from ECG. We developed an innovative contrastive learning algorithm trained on a large-scale paired ECG and CMR dataset, enabling ECG data to map onto the feature space of CMR data. Experimental results demonstrate that our method significantly improves the accuracy of cardiovascular disease diagnosis using only ECG data. Furthermore, our approach enhances the correlation coefficient for predicting cardiac traits from ECG, revealing potential connections between ECG and CMR. This study not only proves the effectiveness of contrastive learning in cross-modal medical image analysis but also offers a low-cost, efficient way to leverage existing ECG equipment for a deeper understanding of cardiovascular health conditions. Our code is available at https:\/\/github.com\/Yukui-1999\/ECCL.", "title":"Cross-Modality Cardiac Insight Transfer: A Contrastive Learning Approach to Enrich ECG with CMR Features", "authors":[ "Ding, Zhengyao", "Hu, Yujian", "Li, Ziyu", "Zhang, Hongkun", "Wu, Fei", "Xiang, Yilang", "Li, Tian", "Liu, Ziyi", "Chu, Xuesen", "Huang, Zhengxing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Yukui-1999\/ECCL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":37 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3269_paper.pdf", "bibtext":"@InProceedings{ Esh_Representation_MICCAI2024,\n author = { Eshraghi Dehaghani, Mehrdad and Sabour, Amirhossein and Madu, Amarachi B. and Lourentzou, Ismini and Moradi, Mehdi },\n title = { { Representation Learning with a Transformer-Based Detection Model for Localized Chest X-Ray Disease and Progression Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image interpretation often encompasses diverse tasks, yet prevailing AI approaches predominantly favor end-to-end image-to-text models for automatic chest X-ray reading and analysis, often overlooking critical components of radiology reports. At the same time, employing separate models for related but distinct tasks leads to computational over-head and the inability to harness the benefits of shared data abstractions. In this work, we introduce a framework for chest X-Ray interpretation, utilizing a Transformer-based object detection model trained on abundant data for learning localized representations. Our model achieves a mean average precision of \u223c 94% in identifying semantically meaningful anatomical regions, facilitating downstream tasks, namely localized disease detection and localized progression monitoring. Our approach yields competitive results in localized disease detection, with an average ROC 89.1% over 9 diseases. In addition, to the best of our knowledge, our work is the first to tackle localized disease progression monitoring, with the proposed model being able to track changes in specific regions of interest (RoIs) with an average accuracy \u223c 67% and average F1 score of \u223c 71%. Code is available at https:\/\/github.com\/McMasterAIHLab\/CheXDetector.", "title":"Representation Learning with a Transformer-Based Detection Model for Localized Chest X-Ray Disease and Progression Detection", "authors":[ "Eshraghi Dehaghani, Mehrdad", "Sabour, Amirhossein", "Madu, Amarachi B.", "Lourentzou, Ismini", "Moradi, Mehdi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/McMasterAIHLab\/CheXDetector" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":38 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1194_paper.pdf", "bibtext":"@InProceedings{ Che_BIMCVR_MICCAI2024,\n author = { Chen, Yinda and Liu, Che and Liu, Xiaoyu and Arcucci, Rossella and Xiong, Zhiwei },\n title = { { BIMCV-R: A Landmark Dataset for 3D CT Text-Image Retrieval } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The burgeoning integration of 3D medical imaging into healthcare has led to a substantial increase in the workload of medical professionals. To assist clinicians in their diagnostic processes and alleviate their workload, the development of a robust system for retrieving similar case studies presents a viable solution.\nWhile the concept holds great promise, the field of 3D medical text-image retrieval is currently limited by the absence of robust evaluation benchmarks and curated datasets. To remedy this, our study presents a groundbreaking dataset, {BIMCV-R}, which includes an extensive collection of 8,069 3D CT volumes, encompassing over 2 million slices, paired with their respective radiological reports.\nExpanding upon the foundational work of our dataset, we craft a retrieval strategy, MedFinder. This approach employs a dual-stream network architecture, harnessing the potential of large language models to advance the field of medical image retrieval beyond existing text-image retrieval solutions. It marks our preliminary step towards developing a system capable of facilitating text-to-image, image-to-text, and keyword-based retrieval tasks. Our project is available at \\url{https:\/\/huggingface.co\/datasets\/cyd0806\/BIMCV-R}.", "title":"BIMCV-R: A Landmark Dataset for 3D CT Text-Image Retrieval", "authors":[ "Chen, Yinda", "Liu, Che", "Liu, Xiaoyu", "Arcucci, Rossella", "Xiong, Zhiwei" ], "id":"Conference", "arxiv_id":"2403.15992", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.15992", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":5, "Models":[ ], "Datasets":[ "cyd0806\/BIMCV-R" ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ "cyd0806\/BIMCV-R" ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":39 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1151_paper.pdf", "bibtext":"@InProceedings{ Liu_MRScore_MICCAI2024,\n author = { Liu, Yunyi and Wang, Zhanyu and Li, Yingshu and Liang, Xinyu and Liu, Lingqiao and Wang, Lei and Zhou, Luping },\n title = { { MRScore: Evaluating Medical Report with LLM-based Reward System } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose MRScore, an innovative automatic evaluation metric specifically tailored for the generation of radiology reports. Traditional (natural language generation) NLG metrics like BLEU are inadequate for accurately assessing reports, particularly those generated by Large Language Models (LLMs). Our experimental findings give systematic evidence of these inadequacies within this paper. To overcome this challenge, we have developed a unique framework intended to guide LLMs in evaluating radiology reports, which was created in collaboration with radiologists adhering to standard human report evaluation procedures. Using this as a prompt can ensure that the LLMs\u2019 output closely mirrors human analysis. We then used the data generated by LLMs to establish a human-labeled dataset by pairing them with accept and reject samples, subsequently training the MRScore model as the reward model with this dataset. MRScore has demonstrated a higher correlation with human judgments and superior performance in model selection when compared with traditional metrics. Our code is available on GitHub at: https:\/\/github.com\/yunyiliu\/MRScore.", "title":"MRScore: Evaluating Medical Report with LLM-based Reward System", "authors":[ "Liu, Yunyi", "Wang, Zhanyu", "Li, Yingshu", "Liang, Xinyu", "Liu, Lingqiao", "Wang, Lei", "Zhou, Luping" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yunyiliu\/MRScore" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":40 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3127_paper.pdf", "bibtext":"@InProceedings{ Che_CausalCLIPSeg_MICCAI2024,\n author = { Chen, Yaxiong and Wei, Minghong and Zheng, Zixuan and Hu, Jingliang and Shi, Yilei and Xiong, Shengwu and Zhu, Xiao Xiang and Mou, Lichao },\n title = { { CausalCLIPSeg: Unlocking CLIP\u2019s Potential in Referring Medical Image Segmentation with Causal Intervention } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Referring medical image segmentation targets delineating lesions indicated by textual descriptions. Aligning visual and textual cues is challenging due to their distinct data properties. Inspired by large-scale pre-trained vision-language models, we propose CausalCLIPSeg, an end-to-end framework for referring medical image segmentation that leverages CLIP. Despite not being trained on medical data, we enforce CLIP\u2019s rich semantic space onto the medical domain by a tailored cross-modal decoding method to achieve text-to-pixel alignment. Furthermore, to mitigate confounding bias that may cause the model to learn spurious correlations instead of meaningful causal relationships, CausalCLIPSeg introduces a causal intervention module which self-annotates confounders and excavates causal features from inputs for segmentation judgments. We also devise an adversarial min-max game to optimize causal features while penalizing confounding ones. Extensive experiments demonstrate the state-of-the-art performance of our proposed method. Code is available at https:\/\/github.com\/WUTCM-Lab\/CausalCLIPSeg.", "title":"CausalCLIPSeg: Unlocking CLIP\u2019s Potential in Referring Medical Image Segmentation with Causal Intervention", "authors":[ "Chen, Yaxiong", "Wei, Minghong", "Zheng, Zixuan", "Hu, Jingliang", "Shi, Yilei", "Xiong, Shengwu", "Zhu, Xiao Xiang", "Mou, Lichao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/WUTCM-Lab\/CausalCLIPSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":41 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0373_paper.pdf", "bibtext":"@InProceedings{ Hay_Online_MICCAI2024,\n author = { Hayoz, Michel and Hahne, Christopher and Kurmann, Thomas and Allan, Max and Beldi, Guido and Candinas, Daniel and Ma\u0301rquez-Neila, Pablo and Sznitman, Raphael },\n title = { { Online 3D reconstruction and dense tracking in endoscopic videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D scene reconstruction from stereo endoscopic video data is crucial for advancing surgical interventions. In this work, we present an online framework for real-time, dense 3D scene reconstruction and tracking, aimed at enhancing surgical scene understanding and assisting interventions. Our method dynamically extends a canonical scene representation using Gaussian splatting, while modeling tissue deformations through a sparse set of control points. We introduce an efficient online fitting algorithm that optimizes the scene parameters, enabling consistent tracking and accurate reconstruction. Through experiments on the StereoMIS dataset, we demonstrate the effectiveness of our approach, outperforming state-of-the-art tracking methods and achieving comparable performance to offline reconstruction techniques. Our work enables various downstream applications thus contributing to advancing the capabilities of surgical assistance systems.", "title":"Online 3D reconstruction and dense tracking in endoscopic videos", "authors":[ "Hayoz, Michel", "Hahne, Christopher", "Kurmann, Thomas", "Allan, Max", "Beldi, Guido", "Candinas, Daniel", "Ma\u0301rquez-Neila, Pablo", "Sznitman, Raphael" ], "id":"Conference", "arxiv_id":"2409.06037", "GitHub":[ "https:\/\/github.com\/mhayoz\/online_endo_track" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":42 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2570_paper.pdf", "bibtext":"@InProceedings{ Che_Accelerated_MICCAI2024,\n author = { Chen, Qi and Xing, Xiaohan and Chen, Zhen and Xiong, Zhiwei },\n title = { { Accelerated Multi-Contrast MRI Reconstruction via Frequency and Spatial Mutual Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"To accelerate Magnetic Resonance (MR) imaging procedures, Multi-Contrast MR Reconstruction (MCMR) has become a prevalent trend that utilizes an easily obtainable modality as an auxiliary to support high-quality reconstruction of the target modality with under-sampled k-space measurements. The exploration of global dependency and complementary information across different modalities is essential for MCMR. However, existing methods either struggle to capture global dependency due to the limited receptive field or suffer from quadratic computational complexity. To tackle this dilemma, we propose a novel Frequency and Spatial Mutual Learning Network (FSMNet), which efficiently explores global dependencies across different modalities. Specifically, the features for each modality are extracted by the Frequency-Spatial Feature Extraction (FSFE) module, featuring a frequency branch and a spatial branch. Benefiting from the global property of the Fourier transform, the frequency branch can efficiently capture global dependency with an image-size receptive field, while the spatial branch can extract local features. To exploit complementary information from the auxiliary modality, we propose a Cross-Modal Selective fusion (CMS-fusion) module that selectively incorporate the frequency and spatial features from the auxiliary modality to enhance the corresponding branch of the target modality. To further integrate the enhanced global features from the frequency branch and the enhanced local features from the spatial branch, we develop a Frequency-Spatial fusion (FS-fusion) module, resulting in a comprehensive feature representation for the target modality. Extensive experiments on the BraTS and fastMRI datasets demonstrate that the proposed FSMNet achieves state-of-the-art performance for the MCMR task with different acceleration factors.", "title":"Accelerated Multi-Contrast MRI Reconstruction via Frequency and Spatial Mutual Learning", "authors":[ "Chen, Qi", "Xing, Xiaohan", "Chen, Zhen", "Xiong, Zhiwei" ], "id":"Conference", "arxiv_id":"2409.14113", "GitHub":[ "https:\/\/github.com\/qic999\/fsmnet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":43 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2372_paper.pdf", "bibtext":"@InProceedings{ Kon_MetaStain_MICCAI2024,\n author = { Konwer, Aishik and Prasanna, Prateek },\n title = { { MetaStain: Stain-generalizable Meta-learning for Cell Segmentation and Classification with Limited Exemplars } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models excel when evaluated on test data that share similar attributes and\/or distribution with the training data. However, their ability to generalize may suffer when there are discrepancies in distributions between the training and testing data i.e. domain shift. In this work, we utilize meta-learning to introduce MetaStain, a stain-generalizable representation learning framework that performs cell segmentation and classification in histopathology images. Owing to the designed episodical meta-learning paradigm, MetaStain can adapt to unseen stains and\/or novel classes through finetuning even with limited annotated samples. We design a stain-aware triplet loss that clusters stain-agnostic class-specific features, as well as separates intra-stain features extracted from different classes. We also employ a consistency triplet loss to preserve the spatial correspondence between tissues under different stains. During test-time adaptation, a refined class weight generator module is optionally introduced if the unseen testing data also involves novel classes. MetaStain significantly outperforms state-of-the-art segmentation and classification methods on the multi-stain MIST dataset under various experimental settings.", "title":"MetaStain: Stain-generalizable Meta-learning for Cell Segmentation and Classification with Limited Exemplars", "authors":[ "Konwer, Aishik", "Prasanna, Prateek" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":44 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4020_paper.pdf", "bibtext":"@InProceedings{ Din_HRDecoder_MICCAI2024,\n author = { Ding, Ziyuan and Liang, Yixiong and Kan, Shichao and Liu, Qing },\n title = { { HRDecoder: High-Resolution Decoder Network for Fundus Image Lesion Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"High resolution is crucial for precise segmentation in fundus images, yet handling high-resolution inputs incurs considerable GPU memory costs, with diminishing performance gains as overhead increases. To address this issue while tackling the challenge of segmenting tiny objects, recent studies have explored local-global feature fusion methods. These methods preserve fine details using local regions and capture context information from downscaled global images. However, the necessity of multiple forward passes inevitably incurs significant computational overhead, greatly affecting inference speed. In this paper, we propose HRDecoder, a simple High-Resolution Decoder network for fundus image segmentation. It integrates a High-resolution Representation Learning (HRL) module to capture fine-grained local features and a High-resolution Feature Fusion (HFF) module to fuse multi-scale local-global feature maps. HRDecoder effectively improves the overall segmentation accuracy of fundus lesions while maintaining reasonable memory usage, computational overhead, and inference speed. Experimental results on the IDRID and DDR datasets demonstrate the effectiveness of our method. The code is available at https:\/\/github.com\/CVIU-CSU\/HRDecoder.", "title":"HRDecoder: High-Resolution Decoder Network for Fundus Image Lesion Segmentation", "authors":[ "Ding, Ziyuan", "Liang, Yixiong", "Kan, Shichao", "Liu, Qing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CVIU-CSU\/HRDecoder" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":45 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4063_paper.pdf", "bibtext":"@InProceedings{ Wan_AHyperreflective_MICCAI2024,\n author = { Wang, Xingguo and Ma, Yuhui and Guo, Xinyu and Zheng, Yalin and Zhang, Jiong and Liu, Yonghuai and Zhao, Yitian },\n title = { { A Hyperreflective Foci Segmentation Network for OCT Images with Multi-dimensional Semantic Enhancement } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diabetic macular edema (DME) is a leading cause of vision loss worldwide. Optical Coherence Tomography (OCT) serves as a widely accepted imaging tool for diagnosing DME due to its non-invasiveness and high resolution cross-sectional view. Clinical evaluation of Hyperreflective Foci (HRF) in OCT contributes to understanding the origins of DME and predicting disease progression or treatment efficacy. However, limited information and a significant imbalance between foreground and background in HRF present challenges for its precise segmentation in OCT images. In this study, we propose an attention mechanism-based MUlti-dimensional Semantic Enhancement Network (MUSE-Net) for HRF segmentation to address these challenges. Specifically, our MUSE-Net comprises attention-based multi-dimensional semantic information enhancement modules and class-imbalance-insensitive joint loss. The adaptive region guidance module softly allocates regional importance in slice, enriching the single-slice semantic information. The adjacent slice guidance module exploits the remote information across consecutive slices, enriching the multi-dimensional semantic information. Class-imbalance-insensitive joint loss combines pixel-level perception optimization with image-level considerations, alleviating the gradient dominance of the background during model training. Our experimental results demonstrate that MUSE-Net outperforms existing methods over two datasets respectively. To further promote the reproducible research, we made the code and these two datasets online available.", "title":"A Hyperreflective Foci Segmentation Network for OCT Images with Multi-dimensional Semantic Enhancement", "authors":[ "Wang, Xingguo", "Ma, Yuhui", "Guo, Xinyu", "Zheng, Yalin", "Zhang, Jiong", "Liu, Yonghuai", "Zhao, Yitian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/iMED-Lab\/MUSEnet-Pytorch" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":46 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4133_paper.pdf", "bibtext":"@InProceedings{ Qua_CausalityInformed_MICCAI2024,\n author = { Quan, Yuyang and Zhang, Chencheng and Guo, Rui and Qian, Xiaohua },\n title = { { Causality-Informed Fusion Network for Automated Assessment of Parkinsonian Body Bradykinesia } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Body bradykinesia, a prominent clinical manifestation of Parkinson\u2019s disease (PD), characterizes a generalized slowness and diminished movement across the entire body. The assessment of body bradykinesia in the widely employed PD rating scale (MDS-UPDRS) is inherently subjective, relying on the examiner\u2019s overall judgment rather than specific motor tasks. Therefore, we propose a graph convolutional network (GCN) scheme for automated video-based assessment of parkinsonian body bradykinesia. This scheme incorporates a causality-informed fusion network to enhance the fusion of causal components within gait and leg-agility motion features, achieving stable multi-class assessment of body bradykinesia. Specifically, an adaptive causal feature selection module is developed to extract pertinent features for body bradykinesia assessment, effectively mitigating the influence of non-causal features. Simultaneously, a causality-informed optimization strategy is designed to refine the causality feature selection module, improving its capacity to capture causal features. Our method achieves 61.07% accuracy for three-class assessment on a dataset of 876 clinical case. Notably, our proposed scheme, utilizing only consumer-level cameras, holds significant promise for remote PD bradykinesia assessment.", "title":"Causality-Informed Fusion Network for Automated Assessment of Parkinsonian Body Bradykinesia", "authors":[ "Quan, Yuyang", "Zhang, Chencheng", "Guo, Rui", "Qian, Xiaohua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":47 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1063_paper.pdf", "bibtext":"@InProceedings{ Men_Genomicsguided_MICCAI2024,\n author = { Meng, Fangliangzi and Zhang, Hongrun and Yan, Ruodan and Chuai, Guohui and Li, Chao and Liu, Qi },\n title = { { Genomics-guided Representation Learning for Pathologic Pan-cancer Tumor Microenvironment Subtype Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The characterization of Tumor MicroEnvironment (TME) is challenging due to its complexity and heterogeneity. Relatively consistent TME characteristics embedded within highly specific tissue features, render them difficult to predict. The capability to accurately classify TME subtypes is of critical significance for clinical tumor diagnosis and precision medicine. Based on the observation that tumors with different origins share similar microenvironment patterns, we propose PathoTME, a genomics-guided representation learning framework employing Whole Slide Image (WSI) for pan-cancer TME subtypes prediction. Specifically, we utilize Siamese network to leverage genomic information as a regularization factor to assist WSI embeddings learning during a training phase. Additionally, we employ Domain Adversarial Neural Network (DANN) to mitigate the impact of tissue type variations. To eliminate domain bias, a dynamic WSI prompt is designed to further unleash the model\u2019s capabilities. Our model achieves better performance than other state-of-the-art methods across 23 cancer types on TCGA dataset. The related code will be released.", "title":"Genomics-guided Representation Learning for Pathologic Pan-cancer Tumor Microenvironment Subtype Prediction", "authors":[ "Meng, Fangliangzi", "Zhang, Hongrun", "Yan, Ruodan", "Chuai, Guohui", "Li, Chao", "Liu, Qi" ], "id":"Conference", "arxiv_id":"2406.06517", "GitHub":[ "https:\/\/github.com\/Mengflz\/PathoTME" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":48 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1802_paper.pdf", "bibtext":"@InProceedings{ Fan_ADomain_MICCAI2024,\n author = { Fan, Xiaoya and Xu, Pengzhi and Zhao, Qi and Hao, Chenru and Zhao, Zheng and Wang, Zhong },\n title = { { A Domain Adaption Approach for EEG-based Automated Seizure Classification with Temporal-Spatial-Spectral Attention } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Electroencephalography (EEG) based automated seizure classification can significantly ameliorate seizure diagnosis and treatment. However, the intra- and inter- subject variability in EEG data make it a challenging task. Especially, a model trained on data from multiple subjects typically degenerates when applied to new subjects. In this study, we propose an attention based deep convolutional neural network with domain adaption to tackle these issues. The model is able to learn domain-invariant temporal-spatial-spectral (TSS) features by jointly optimizing a feature extractor, a seizure classifier and a domain discriminator. The feature extractor extracts multi-level TSS features by an attention module. The domain discriminator is designed to determine which domain, i.e., source or target, the features come from. With a gradient reversal layer, it allows extraction of domain-invariant features. Thus, the classifier is able to give accurate prediction for unseen subjects by leveraging knowledge learned from the source domain. We evaluated our approach using the Temple University Hospital EEG Seizure Corpus (TUSZ) v1.5.2. Results demonstrate that the proposed approach achieves the state-of-the-art performance on seizure classification. The code is available at https:\/\/github.com\/Dondlut\/EEG_DOMAIN.", "title":"A Domain Adaption Approach for EEG-based Automated Seizure Classification with Temporal-Spatial-Spectral Attention", "authors":[ "Fan, Xiaoya", "Xu, Pengzhi", "Zhao, Qi", "Hao, Chenru", "Zhao, Zheng", "Wang, Zhong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Dondlut\/EEG_DOMAIN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":49 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0736_paper.pdf", "bibtext":"@InProceedings{ Li_SelfsupervisedDenoising_MICCAI2024,\n author = { Li, Zhenghong and Ren, Jiaxiang and Zou, Zhilin and Garigapati, Kalyan and Du, Congwu and Pan, Yingtian and Ling, Haibin },\n title = { { Self-supervised Denoising and Bulk Motion Artifact Removal of 3D Optical Coherence Tomography Angiography of Awake Brain } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Denoising of 3D Optical Coherence Tomography Angiography (OCTA) for awake brain microvasculature is challenging. An OCTA volume is scanned slice by slice, with each slice (named B-scan) derived from dynamic changes in successively acquired OCT images. A B-scan of an awake brain often suffers from complex noise and Bulk Motion Artifacts (BMA), severely degrading image quality. Also, acquiring clean B-scans for training is difficult. Fortunately, we observe that, the slice-wise imaging procedure makes the noises mostly independent across B-scans, while preserves the continuity of vessel (including capillaries) signals across B-scans. Thus inspired, we propose a novel blind-slice self-supervised learning method to denoise 3D brain OCTA volumes slice by slice. For each B-scan slice, named center B-scan, we mask it entirely black and train the network to recover the original center B-scan using its neighboring B-scans. To enhance the BMA removal performance, we adaptively select only BMA-free center B-scans for model training. We further propose two novel refinement methods: (1) a non-local block to enhance vessel continuity and (2) a weighted loss to improve vascular contrast. To the best of our knowledge, this is the first self-supervised 3D OCTA denoising method that effectively reduces both complex noise and BMA while preserving capillary signals in brain OCTA volumes.", "title":"Self-supervised Denoising and Bulk Motion Artifact Removal of 3D Optical Coherence Tomography Angiography of Awake Brain", "authors":[ "Li, Zhenghong", "Ren, Jiaxiang", "Zou, Zhilin", "Garigapati, Kalyan", "Du, Congwu", "Pan, Yingtian", "Ling, Haibin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ZhenghLi\/SOAD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":50 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0750_paper.pdf", "bibtext":"@InProceedings{ Jin_Diff3Dformer_MICCAI2024,\n author = { Jin, Zihao and Fang, Yingying and Huang, Jiahao and Xu, Caiwen and Walsh, Simon and Yang, Guang },\n title = { { Diff3Dformer: Leveraging Slice Sequence Diffusion for Enhanced 3D CT Classification with Transformer Networks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The manifestation of symptoms associated with lung diseases can vary in different depths for individual patients, highlighting the significance of 3D information in CT scans for medical image classification. While Vision Transformer has shown superior performance over convolutional neural networks in image classification tasks, their effectiveness is often demonstrated on sufficiently large 2D datasets and they easily encounter overfitting issues on small medical image datasets. To address this limitation, we propose a Diffusion-based 3D Vision Transformer (Diff3Dformer), which utilizes the latent space of the Diffusion model to form the slice sequence for 3D analysis and incorporates clustering attention into ViT to aggregate repetitive information within 3D CT scans, thereby harnessing the power of the advanced transformer in 3D classification tasks on small datasets. Our method exhibits improved performance on two different scales of small datasets of 3D lung CT scans, surpassing the state of the art 3D methods and other transformer-based approaches that emerged during the COVID-19 pandemic, demonstrating its robust and superior performance across different scales of data. Experimental results underscore the superiority of our proposed method, indicating its potential for enhancing medical image classification tasks in real-world scenarios. \nThe code will be publicly available at https:\/\/github.com\/ayanglab\/Diff3Dformer.", "title":"Diff3Dformer: Leveraging Slice Sequence Diffusion for Enhanced 3D CT Classification with Transformer Networks", "authors":[ "Jin, Zihao", "Fang, Yingying", "Huang, Jiahao", "Xu, Caiwen", "Walsh, Simon", "Yang, Guang" ], "id":"Conference", "arxiv_id":"2406.17173", "GitHub":[ "https:\/\/github.com\/ayanglab\/Diff3Dformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":51 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2954_paper.pdf", "bibtext":"@InProceedings{ Van_Towards_MICCAI2024,\n author = { Vanneste, F\u00e9lix and Martin, Claire and Goury, Olivier and Courtecuisse, Hadrien and Pernod, Erik and Cotin, St\u00e9phane and Duriez, Christian },\n title = { { Towards realistic needle insertion training simulator using partitioned model order reduction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Needle-based intervention is part of minimally invasive surgery\nand has the benefit of allowing the reach of deep internal organ structures while limiting trauma. However, reaching good performance requires a skilled practitioner. This paper presents a needle-insertion training simulator for the liver based on the finite element method. One of the main challenges in developing realistic training simulators is to use fine meshes to represent organ deformations accurately while keeping a real-time constraint in the speed of computation to allow interactivity of the simulator. This is especially true for simulating accurately the region of the organs where the needle is inserted. In this paper, we propose the use of model order reduction to allow drastic gains in performance. To simulate accurately the liver which undergoes highly nonlinear local deformation along the needle-insertion path, we propose a new partition method for model order reduction: applied to the liver, we can perform FEM computations on a high-resolution mesh on the part in interaction with the needle while having model reduction elsewhere for greater computational performances. We show the combined methods with an interactive simulation of percutaneous needle-based interventions for tumor biopsy\/ablation using patient-based anatomy.", "title":"Towards realistic needle insertion training simulator using partitioned model order reduction", "authors":[ "Vanneste, F\u00e9lix", "Martin, Claire", "Goury, Olivier", "Courtecuisse, Hadrien", "Pernod, Erik", "Cotin, St\u00e9phane", "Duriez, Christian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/SofaDefrost\/ModelOrderReduction" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":52 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3110_paper.pdf", "bibtext":"@InProceedings{ P._Domain_MICCAI2024,\n author = { P. Garc\u00eda-de-la-Puente, Natalia and L\u00f3pez-P\u00e9rez, Miguel and Launet, La\u00ebtitia and Naranjo, Valery },\n title = { { Domain Adaptation for Unsupervised Cancer Detection: An application for skin Whole Slides Images from an interhospital dataset } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Skin cancer diagnosis relies on assessing the histopathological appearance of skin cells and the patterns of epithelial skin tissue architecture. Despite recent advancements in deep learning for automating skin cancer detection, two main challenges persist for their clinical deployment. (1) Deep learning models only recognize the classes trained on, giving arbitrary predictions for rare or unknown diseases. (2) The generalization across healthcare institutions, as variations arising from diverse scanners and staining procedures, increase the task complexity.\nWe propose a novel Domain Adaptation method for Unsupervised cancer Detection (DAUD) using whole slide images to address these concerns. Our method consists of an autoencoder-based model with stochastic latent variables that reflect each institution\u2019s features. \nWe have validated DAUD in a real-world dataset from two different hospitals. In addition, we utilized an external dataset to evaluate the capability for out-of-distribution detection. DAUD demonstrates comparable or superior performance to the state-of-the-art methods for anomaly detection.", "title":"Domain Adaptation for Unsupervised Cancer Detection: An application for skin Whole Slides Images from an interhospital dataset", "authors":[ "P. Garc\u00eda-de-la-Puente, Natalia", "L\u00f3pez-P\u00e9rez, Miguel", "Launet, La\u00ebtitia", "Naranjo, Valery" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cvblab\/DAUD-MICCAI2024" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":53 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1443_paper.pdf", "bibtext":"@InProceedings{ Liu_MultiModal_MICCAI2024,\n author = { Liu, Shuting and Zhang, Baochang and Zimmer, Veronika A. and Rueckert, Daniel },\n title = { { Multi-Modal Data Fusion with Missing Data Handling for Mild Cognitive Impairment Progression Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Predicting Mild Cognitive Impairment (MCI) progression, an early stage of Alzheimer\u2019s Disease (AD), is crucial but challenging due to the disease\u2019s complexity. Integrating diverse data sources like clinical assessments and neuroimaging poses hurdles, particularly with data preprocessing and handling missing data. When data is missing, it can introduce uncertainty and reduce the effectiveness of statistical models. Moreover, ignoring missing data or handling it improperly can distort results and compromise the validity of research findings. In this paper, we introduce a novel fusion model considering missing data handling for early diagnosis of AD. This includes a novel image-to-graphical representation module that considers the heterogeneity of brain anatomy, and a missing data compensation module. In the image-to-graphical representation module, we construct a subject-specific graph representing the connectivity among 100 brain regions derived from structural MRI, incorporating the feature maps extracted by segmentation network into the node features. We also propose a novel multi-head dynamic graph convolution network to further extract graphical features. In the missing data compensation module, a self-supervised model is designed to compensate for partially missing information, alongside a latent-space transfer model tailored for cases where tabular data is completely missing. Experimental results on ADNI dataset with 696 subjects demonstrate the superiority of our proposed method over existing state-of-the-art methods. Our method achieves a balanced accuracy of 92.79% on clinical data with partially missing cases and an impressive 92.35% even without clinical data input.", "title":"Multi-Modal Data Fusion with Missing Data Handling for Mild Cognitive Impairment Progression Prediction", "authors":[ "Liu, Shuting", "Zhang, Baochang", "Zimmer, Veronika A.", "Rueckert, Daniel" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":54 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2150_paper.pdf", "bibtext":"@InProceedings{ Liu_Medical_MICCAI2024,\n author = { Liu, Yishu and Wu, Zhongqi and Chen, Bingzhi and Zhang, Zheng and Lu, Guangming },\n title = { { Medical Cross-Modal Prompt Hashing with Robust Noisy Correspondence Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the realm of medical data analysis, medical cross-modal hashing (Med-CMH) has emerged as a promising approach to facilitate fast similarity search across multi-modal medical data. However, due to human subjective deviation or semantic ambiguity, the presence of noisy correspondence across medical modalities exacerbates the challenge of the heterogeneous gap in cross-modal learning. To eliminate clinical noisy correspondence, this paper proposes a novel medical cross-modal prompt hashing (MCPH) that incorporates multi-modal prompt optimization with noise-robust contrastive constraint for facilitating noisy correspondence issues. Benefitting from the robust reasoning capabilities inherent in medical large-scale models, we design a visual-textual prompt learning paradigm to collaboratively enhance alignment and contextual awareness between the medical visual and textual representations. By providing targeted prompts and cues from the medical large language model (LLM), i.e., CheXagent, multi-modal prompt learning facilitates the extraction of relevant features and associations, empowering the model with actionable insights and decision support. Furthermore, a noise-robust contrastive learning strategy is dedicated to dynamically adjusting the intensity of contrastive learning across modalities, thereby enhancing the contrast strength of positive pairs while mitigating the influence of noisy correspondence pairs. Extensive experiments on multiple benchmark datasets demonstrate that our MCPH surpasses the state-of-the-art baselines.", "title":"Medical Cross-Modal Prompt Hashing with Robust Noisy Correspondence Learning", "authors":[ "Liu, Yishu", "Wu, Zhongqi", "Chen, Bingzhi", "Zhang, Zheng", "Lu, Guangming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":55 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0746_paper.pdf", "bibtext":"@InProceedings{ Wan_Correlationadaptive_MICCAI2024,\n author = { Wan, Peng and Zhang, Shukang and Shao, Wei and Zhao, Junyong and Yang, Yinkai and Kong, Wentao and Xue, Haiyan and Zhang, Daoqiang },\n title = { { Correlation-adaptive Multi-view CEUS Fusion for Liver Cancer Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dual-screen contrast-enhanced ultrasound (CEUS) has been the first-line imaging techniques for the differential diagnosis of primary liver cancer (PLC), since the imaging of tumor micro-circulation perfusion as well as anatomic features of B-mode ultrasound (BUS) view. Although previous multi-view learning methods have shown their potential to boost diagnostic efficacy, correlation variances of different views among subjects are largely underestimated, arising from the varying imaging quality of different views and the presence of valuable findings or not. In this paper, we propose a correlation-adaptive multi-view fusion method (CAMVF) for dual-screen CEUS based PLC diagnosis. Towards a reliable fusion of multi-view CEUS findings (i.e., BUS, CEUS and its parametric imaging), our method dynamically assesses the correlation of each view based on the prediction confidence itself and prediction consistency among views. Specifically, we first obtain the confidence of each view with evidence-based uncertainty estimation, then divide them into credible and incredible views based on cross-view consistency, and finally ensemble views with weights adaptive to their credibility. In this retrospective study, we collected CEUS imaging from 238 liver cancer patients in total, and our method achieves the superior diagnostic accuracy and specificity of 88.33% and 92.48%, respectively, demonstrating its efficacy for PLC differential diagnosis. Our code is available at https:\/\/github.com\/shukangzh\/CAMVF.", "title":"Correlation-adaptive Multi-view CEUS Fusion for Liver Cancer Diagnosis", "authors":[ "Wan, Peng", "Zhang, Shukang", "Shao, Wei", "Zhao, Junyong", "Yang, Yinkai", "Kong, Wentao", "Xue, Haiyan", "Zhang, Daoqiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/shukangzh\/CAMVF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":56 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2948_paper.pdf", "bibtext":"@InProceedings{ Che_Striving_MICCAI2024,\n author = { Chen, Yaxiong and Wang, Yujie and Zheng, Zixuan and Hu, Jingliang and Shi, Yilei and Xiong, Shengwu and Zhu, Xiao Xiang and Mou, Lichao },\n title = { { Striving for Simplicity: Simple Yet Effective Prior-Aware Pseudo-Labeling for Semi-Supervised Ultrasound Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical ultrasound imaging is ubiquitous, but manual analysis struggles to keep pace. Automated segmentation can help but requires large labeled datasets, which are scarce. Semi-supervised learning leveraging both unlabeled and limited labeled data is a promising approach. State-of-the-art methods use consistency regularization or pseudo-labeling but grow increasingly complex. Without sufficient labels, these models often latch onto artifacts or allow anatomically implausible segmentations. In this paper, we present a simple yet effective pseudo-labeling method with an adversarially learned shape prior to regularize segmentations. Specifically, we devise an encoder-twin-decoder network where the shape prior acts as an implicit shape model, penalizing anatomically implausible but not ground-truth-deviating predictions. Without bells and whistles, our simple approach achieves state-of-the-art performance on two benchmarks under different partition protocols. We provide a strong baseline for future semi-supervised medical image segmentation. Code is available at https:\/\/github.com\/WUTCM-Lab\/Shape-Prior-Semi-Seg.", "title":"Striving for Simplicity: Simple Yet Effective Prior-Aware Pseudo-Labeling for Semi-Supervised Ultrasound Image Segmentation", "authors":[ "Chen, Yaxiong", "Wang, Yujie", "Zheng, Zixuan", "Hu, Jingliang", "Shi, Yilei", "Xiong, Shengwu", "Zhu, Xiao Xiang", "Mou, Lichao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/WUTCM-Lab\/Shape-Prior-Semi-Seg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":57 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2508_paper.pdf", "bibtext":"@InProceedings{ Kwo_AnatomicallyGuided_MICCAI2024,\n author = { Kwon, Junmo and Seo, Sang Won and Park, Hyunjin },\n title = { { Anatomically-Guided Segmentation of Cerebral Microbleeds in T1-weighted and T2*-weighted MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cerebral microbleeds (CMBs) are defined as relatively small blood depositions in the brain that serve as severity indicators of small vessel diseases, and thus accurate quantification of CMBs is clinically useful. However, manual annotation of CMBs is an extreme burden for clinicians due to their small size and the potential risk of misclassification. Moreover, the extreme class imbalance inherent in CMB segmentation tasks presents a significant challenge for training deep neural networks. In this paper, we propose to enhance CMB segmentation performance by introducing a proxy task of segmentation of supratentorial and infratentorial regions. This proxy task could leverage clinical prior knowledge in the identification of CMBs. We evaluated the proposed model using an in-house dataset comprising 335 subjects with 582 longitudinal cases and an external public dataset consisting of 72 cases. Our method performed better than other methods that did not consider proxy tasks. Quantitative results indicate that the proxy task is robust on unseen datasets and thus effective in reducing false positives. Our code is available at https:\/\/github.com\/junmokwon\/AnatGuidedCMBSeg.", "title":"Anatomically-Guided Segmentation of Cerebral Microbleeds in T1-weighted and T2*-weighted MRI", "authors":[ "Kwon, Junmo", "Seo, Sang Won", "Park, Hyunjin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/junmokwon\/AnatGuidedCMBSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":58 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1351_paper.pdf", "bibtext":"@InProceedings{ Wan_Joint_MICCAI2024,\n author = { Wang, Zhicheng and Li, Jiacheng and Chen, Yinda and Shou, Jiateng and Deng, Shiyu and Huang, Wei and Xiong, Zhiwei },\n title = { { Joint EM Image Denoising and Segmentation with Instance-aware Interaction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In large scale electron microscopy(EM), the demand for rapid imaging often results in significant amounts of imaging noise, which considerably compromise segmentation accuracy. While conventional approaches typically incorporate denoising as a preliminary stage, there is limited exploration into the potential synergies between denoising and segmentation processes. To bridge this gap, we propose an instance-aware interaction framework to tackle EM image denoising and segmentation simultaneously, aiming at mutual enhancement between the two tasks. Specifically, our framework comprises three components: a denoising network, a segmentation network, and a fusion network facilitating feature-level interaction. Firstly, the denoising network mitigates noise degradation. Subsequently, the segmentation network learns an instance-level affinity prior, encoding vital spatial structural information. Finally, in the fusion network, we propose a novel Instance-aware Embedding Module (IEM) to utilize vital spatial structure information from segmentation features for denoising. IEM enables interaction between the two tasks within a unified framework, which also facilitates implicit feedback from denoising for segmentation with a joint training mechanism. Through extensive experiments across multiple datasets, our framework demonstrates substantial performance improvements over existing solutions. Moreover, our framework exhibits strong generalization capabilities across different network architectures. Code is available at https:\/\/github.com\/zhichengwang-tri\/EM-DenoiSeg.", "title":"Joint EM Image Denoising and Segmentation with Instance-aware Interaction", "authors":[ "Wang, Zhicheng", "Li, Jiacheng", "Chen, Yinda", "Shou, Jiateng", "Deng, Shiyu", "Huang, Wei", "Xiong, Zhiwei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhichengwang-tri\/EM-DenoiSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":59 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2778_paper.pdf", "bibtext":"@InProceedings{ Pen_GBT_MICCAI2024,\n author = { Peng, Zhihao and He, Zhibin and Jiang, Yu and Wang, Pengyu and Yuan, Yixuan },\n title = { { GBT: Geometric-oriented Brain Transformer for Autism Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Human brains are typically modeled as networks of Regions of Interest (ROI) to comprehend brain functional Magnetic Resonance Imaging (fMRI) connectome for Autism diagnosis. Recently, various deep neural network-based models have been developed to learn the representation of ROIs, achieving impressive performance improvements. However, they (i) heavily rely on increasingly complex network architecture with an obscure learning mechanism, or (ii) solely utilize the cross-entropy loss to supervise the training process, leading to sub-optimal performance. To this end, we propose a simple and effective Geometric-oriented Brain Transformer (GBT) with the Attention Weight Matrix Approximation (AWMA)-based transformer module and the geometric-oriented representation learning module for brain fMRI connectome analysis. Specifically, the AWMA-based transformer module selectively removes the components of the attention weight matrix with smaller singular values, aiming to learn the most relevant and representative graph representation. The geometric-oriented representation learning module imposes low-rank intra-class compactness and high-rank inter-class diversity constraints on learned representations to promote that to be discriminative. Experimental results on the ABIDE dataset validate that our method GBT consistently outperforms state-of-the-art approaches. The code is available at https:\/\/github.com\/CUHK-AIM-Group\/GBT.", "title":"GBT: Geometric-oriented Brain Transformer for Autism Diagnosis", "authors":[ "Peng, Zhihao", "He, Zhibin", "Jiang, Yu", "Wang, Pengyu", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/GBT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":60 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3878_paper.pdf", "bibtext":"@InProceedings{ Bae_HoGNet_MICCAI2024,\n author = { Bae, Joseph and Kapse, Saarthak and Zhou, Lei and Mani, Kartik and Prasanna, Prateek },\n title = { { HoG-Net: Hierarchical Multi-Organ Graph Network for Head and Neck Cancer Recurrence Prediction from CT Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"In many diseases including head and neck squamous cell carcinoma (HNSCC), pathologic processes are not limited to a single region of interest, but instead encompass surrounding anatomical structures and organs outside of the tumor. To model information from organs-at-risk (OARs) as well as from the primary tumor, we present a Hierarchical Multi-Organ Graph Network (HoG-Net) for medical image modeling which we leverage to predict locoregional tumor recurrence (LR) for HNSCC patients. HoG-Net is able to model local features from individual OARs and then constructs a holistic global representation of interactions between features from multiple OARs in a single image. HoG-Net\u2019s prediction of LR for HNSCC patients is evaluated in a largest yet studied dataset of N=2,741 patients from six institutions, and outperforms several previously published baselines. Further, HoG-Net allows insights into which OARs are significant in predicting LR, providing specific OAR-level interpretability rather than the coarse patch-level interpretability provided by other methods.", "title":"HoG-Net: Hierarchical Multi-Organ Graph Network for Head and Neck Cancer Recurrence Prediction from CT Images", "authors":[ "Bae, Joseph", "Kapse, Saarthak", "Zhou, Lei", "Mani, Kartik", "Prasanna, Prateek" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/bmi-imaginelab\/HoGNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":61 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1451_paper.pdf", "bibtext":"@InProceedings{ Den_HATs_MICCAI2024,\n author = { Deng, Ruining and Liu, Quan and Cui, Can and Yao, Tianyuan and Xiong, Juming and Bao, Shunxing and Li, Hao and Yin, Mengmeng and Wang, Yu and Zhao, Shilin and Tang, Yucheng and Yang, Haichun and Huo, Yuankai },\n title = { { HATs: Hierarchical Adaptive Taxonomy Segmentation for Panoramic Pathology Image Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Panoramic image segmentation in computational pathology presents a remarkable challenge due to the morphologically complex and variably scaled anatomy. For instance, the intricate organization in kidney pathology spans multiple layers, from regions like the cortex and medulla to functional units such as glomeruli, tubules, and vessels, down to various cell types. In this paper, we propose a novel Hierarchical Adaptive Taxonomy Segmentation (HATs) method, which is designed to thoroughly segment panoramic views of kidney structures by leveraging detailed anatomical insights. Our approach entails (1) the innovative HATS technique which translates spatial relationships among 15 distinct object classes into a versatile \u201cplug-and-play\u201d loss function that spans across regions, functional units, and cells, (2) the incorporation of anatomical hierarchies and scale considerations into a unified simple matrix representation for all panoramic entities, (3) the adoption of the latest AI foundation model (EfficientSAM) as a feature extraction tool to boost the model\u2019s adaptability, yet eliminating the need for manual prompt generation in conventional segment anything model (SAM). Experimental findings demonstrate that the HATS method offers an efficient and effective strategy for integrating clinical insights and imaging precedents into a unified segmentation model across more than 15 categories. The official implementation is publicly available at https:\/\/github.com\/hrlblab\/HATs.", "title":"HATs: Hierarchical Adaptive Taxonomy Segmentation for Panoramic Pathology Image Analysis", "authors":[ "Deng, Ruining", "Liu, Quan", "Cui, Can", "Yao, Tianyuan", "Xiong, Juming", "Bao, Shunxing", "Li, Hao", "Yin, Mengmeng", "Wang, Yu", "Zhao, Shilin", "Tang, Yucheng", "Yang, Haichun", "Huo, Yuankai" ], "id":"Conference", "arxiv_id":"2407.00596", "GitHub":[ "https:\/\/github.com\/hrlblab\/HATs" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":62 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3323_paper.pdf", "bibtext":"@InProceedings{ Bau_Deep_MICCAI2024,\n author = { Baumann, Alexander and Ayala, Leonardo and Studier-Fischer, Alexander and Sellner, Jan and \u00d6zdemir, Berkin and Kowalewski, Karl-Friedrich and Ilic, Slobodan and Seidlitz, Silvia and Maier-Hein, Lena },\n title = { { Deep intra-operative illumination calibration of hyperspectral cameras } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Hyperspectral imaging (HSI) is emerging as a promising novel imaging modality with various potential surgical applications. Currently available cameras, however, suffer from poor integration into the clinical workflow because they require the lights to be switched off, or the camera to be manually recalibrated as soon as lighting conditions change. Given this critical bottleneck, the contribution of this paper is threefold: (1) We demonstrate that dynamically changing lighting conditions in the operating room dramatically affect the performance of HSI applications, namely physiological parameter estimation, and surgical scene segmentation. (2) We propose a novel learning-based approach to automatically recalibrating hyperspectral images during surgery and show that it is sufficiently accurate to replace the tedious process of white reference-based recalibration. (3) Based on a total of 742 HSI cubes from a phantom, porcine models, and rats we show that our recalibration method not only outperforms previously proposed methods, but also generalizes across species, lighting conditions, and image processing tasks. Due to its simple workflow integration as well as high accuracy, speed, and generalization capabilities, our method could evolve as a central component in clinical surgical HSI.", "title":"Deep intra-operative illumination calibration of hyperspectral cameras", "authors":[ "Baumann, Alexander", "Ayala, Leonardo", "Studier-Fischer, Alexander", "Sellner, Jan", "\u00d6zdemir, Berkin", "Kowalewski, Karl-Friedrich", "Ilic, Slobodan", "Seidlitz, Silvia", "Maier-Hein, Lena" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":63 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3549_paper.pdf", "bibtext":"@InProceedings{ Wal_Multisequence_MICCAI2024,\n author = { Walsh, Ricky and Gaubert, Malo and Meur\u00e9e, C\u00e9dric and Hussein, Burhan Rashid and Kerbrat, Anne and Casey, Romain and Comb\u00e8s, Benoit and Galassi, Francesca },\n title = { { Multi-sequence learning for multiple sclerosis lesion segmentation in spinal cord MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated tools developed to detect multiple sclerosis lesions in spinal cord MRI have thus far been based on processing single MR sequences in a deep learning model. This study is the first to explore a multi-sequence approach to this task and we propose a method to address inherent issues in multi-sequence spinal cord data, i.e., differing fields of view, inter-sequence alignment and incomplete sequence data for training and inference. In particular, we investigate a simple missing-modality method of replacing missing features with the mean over the available sequences. This approach leads to better segmentation results when processing a single sequence at inference than a model trained directly on that sequence, and our experiments provide valuable insights into the mechanism underlying this surprising result. In particular, we demonstrate that both the encoder and decoder benefit from the variability introduced in the multi-sequence setting. Additionally, we propose a latent feature augmentation scheme to reproduce this variability in a single-sequence setting, resulting in similar improvements over the single-sequence baseline.", "title":"Multi-sequence learning for multiple sclerosis lesion segmentation in spinal cord MRI", "authors":[ "Walsh, Ricky", "Gaubert, Malo", "Meur\u00e9e, C\u00e9dric", "Hussein, Burhan Rashid", "Kerbrat, Anne", "Casey, Romain", "Comb\u00e8s, Benoit", "Galassi, Francesca" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":64 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1172_paper.pdf", "bibtext":"@InProceedings{ Lv_Aligning_MICCAI2024,\n author = { Lv, Yanan and Jia, Haoze and Chen, Xi and Yan, Haiyang and Han, Hua },\n title = { { Aligning and Restoring Imperfect ssEM images for Continuity Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The serial section electron microscopy reconstruction method is commonly used in large volume reconstruction of biological tissue, but the inevitable section damage brings challenges to volume reconstruction. The section damage may result in imperfect section alignment and affect the subsequent neuron segmentation and data analysis. This paper proposes an aligning and restoring method for imperfect sections, which contributes to promoting the continuity reconstruction of biological tissues. To align imperfect sections, we improve the optical flow network to address the difficulties faced by traditional optical flow networks in handling issues related to discontinuous deformations and large displacements in the alignment of imperfect sections. Based on the deformations in different regions, the Guided Position of each coordinate point on the section is estimated to generate the Guided Field of the imperfect section. This Guided field aids the optical flow network in better handling the complex deformation and large displacement associated with the damaged area during alignment. Subsequently, the damaged region is predicted and seamlessly integrated into the aligned imperfect section images, ultimately obtaining aligned damage-free section images. Experimental results demonstrate that the proposed method effectively resolves the alignment and restoration issues of imperfect sections, achieving better alignment accuracy than existing methods and significantly improving neuron segmentation accuracy. Our code is available at https:\/\/github.com\/lvyanan525\/Aligning-and-Restoring-Imperfect-ssEM-images.", "title":"Aligning and Restoring Imperfect ssEM images for Continuity Reconstruction", "authors":[ "Lv, Yanan", "Jia, Haoze", "Chen, Xi", "Yan, Haiyang", "Han, Hua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lvyanan525\/Aligning-and-Restoring-Imperfect-ssEM-images" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":65 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1033_paper.pdf", "bibtext":"@InProceedings{ Kim_Semisupervised_MICCAI2024,\n author = { Kim, Eunjin and Kwon, Gitaek and Kim, Jaeyoung and Park, Hyunjin },\n title = { { Semi-supervised Segmentation through Rival Networks Collaboration with Saliency Map in Diabetic Retinopathy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic segmentation of diabetic retinopathy (DR) lesions in retinal images has a translational impact. However, collecting pixel-level annotations for supervised learning is labor-intensive. Thus, semi-supervised learning (SSL) methods tapping into the abundance of unlabeled images have been widely accepted. Still, a blind application of SSL is problematic due to the confirmation bias stemming from unreliable pseudo masks and class imbalance. To address these concerns, we propose a Rival Networks Collaboration with Saliency Map (RiCo) for multi-lesion segmentation in retinal images for DR. From two competing networks, we declare a victor network based on Dice coefficient onto which the defeated network is aligned when exploiting unlabeled images. Recognizing that this competition might overlook small lesions, we equip rival networks with distinct weight systems for imbalanced and underperforming classes. The victor network dynamically guides the defeated network by complementing its weaknesses and mimicking the victor\u2019s strengths. This process fosters effective collaborative growth through meaningful knowledge exchange. Furthermore, we incorporate a saliency map, highlighting color-striking structures, into consistency loss to significantly enhance alignment in structural and critical areas for retinal images. This approach improves reliability and stability by minimizing the influence of unreliable areas of the pseudo mask. A comprehensive comparison with state-of-the-art SSL methods demonstrates our method\u2019s superior performance on two datasets (IDRiD and e-ophtha). Our code is available at https:\/\/github.com\/eunjinkim97\/SSL_DRlesion.", "title":"Semi-supervised Segmentation through Rival Networks Collaboration with Saliency Map in Diabetic Retinopathy", "authors":[ "Kim, Eunjin", "Kwon, Gitaek", "Kim, Jaeyoung", "Park, Hyunjin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/eunjinkim97\/SSL_DRlesion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":66 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3447_paper.pdf", "bibtext":"@InProceedings{ Zor_EnhancedquickDWI_MICCAI2024,\n author = { Zormpas-Petridis, Konstantinos and Candito, Antonio and Messiou, Christina and Koh, Dow-Mu and Blackledge, Matthew D. },\n title = { { Enhanced-quickDWI: Achieving equivalent clinical quality by denoising heavily sub-sampled diffusion-weighted imaging data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Whole-body diffusion-weighted imaging (DWI) is a sensitive tool for assessing the spread of metastatic bone malignancies. It offers voxel-wise calculation of apparent diffusion coefficient (ADC) which correlates with tissue cellularity, providing a potential imaging biomarker for tumour re-sponse assessment. However, DWI is an inherently noisy technique requiring many signal aver-ages over multiple b-values, leading to times of up to 30 minutes for a whole-body exam. We present a novel neural network implicitly designed to provide high-quality images from heavily sub-sampled diffusion data (only 1 signal average) which allow whole-body acquisitions of ~5 minutes. We demonstrate that our network can achieve equivalent quality to the clinical b-value and ADC images in a radiological multi-reader study of 100 patients for whole-body and abdo-men-pelvis data. We also achieved good agreement to the quantitative values of clinical images within multi-lesion segmentations in 16 patients compared to a previous approach.", "title":"Enhanced-quickDWI: Achieving equivalent clinical quality by denoising heavily sub-sampled diffusion-weighted imaging data", "authors":[ "Zormpas-Petridis, Konstantinos", "Candito, Antonio", "Messiou, Christina", "Koh, Dow-Mu", "Blackledge, Matthew D." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":67 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1711_paper.pdf", "bibtext":"@InProceedings{ Dai_RIPAV_MICCAI2024,\n author = { Dai, Wei and Yao, Yinghao and Kong, Hengte and Chen, Zhen Ji and Wang, Sheng and Bai, Qingshi and Sun, Haojun and Yang, Yongxin and Su, Jianzhong },\n title = { { RIP-AV: Joint Representative Instance Pre-training with Context Aware Network for Retinal Artery\/Vein Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate deep learning-based segmentation of retinal arteries and veins (A\/V) enables improved diagnosis, monitoring, and management of ocular fundus diseases and systemic diseases. However, existing resized and patch-based algorithms face challenges with redundancy, overlooking thin vessels, and underperforming in low-contrast edge areas of the retinal images, due to imbalanced background-to-A\/V ratios and limited contexts. Here, we have developed a novel deep learning framework for retinal A\/V segmentation, named RIP-AV, which integrates a Representative Instance Pre-training (RIP) task with a context-aware network for retinal A\/V segmentation for the first time. Initially, we develop a direct yet effective algorithm for vascular patch-pair selection (PPS) and then introduce a RIP task, formulated as a multi-label problem, aiming at enhancing the network\u2019s capability to learn latent arteriovenous features from diverse spatial locations across vascular patches. Subsequently, in the training phase, we introduce two novel modules: Patch Context Fusion (PCF) module and Distance Aware (DA) module. They are designed to improve the discriminability and continuity of thin vessels, especially in low-contrast edge areas, by leveraging the relationship between vascular patches and their surrounding contexts cooperatively and complementarily. The effectiveness of RIP-AV has been validated on three publicly available retinal datasets: AV-DRIVE, LES-AV, and HRF, demonstrating remarkable accuracies of 0.970, 0.967, and 0.981, respectively, thereby outperforming existing state-of-the-art methods. Notably, our method achieves a significant 1.7% improvement in accuracy on the HRF dataset, particularly enhancing the segmentation of thin edge arteries and veins.", "title":"RIP-AV: Joint Representative Instance Pre-training with Context Aware Network for Retinal Artery\/Vein Segmentation", "authors":[ "Dai, Wei", "Yao, Yinghao", "Kong, Hengte", "Chen, Zhen Ji", "Wang, Sheng", "Bai, Qingshi", "Sun, Haojun", "Yang, Yongxin", "Su, Jianzhong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/weidai00\/RIP-AV" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":68 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2802_paper.pdf", "bibtext":"@InProceedings{ Den_TAPoseNet_MICCAI2024,\n author = { Deng, Qingxin and Yang, Xunyu and Huang, Minghan and Jiang, Landu and Zhang, Dian },\n title = { { TAPoseNet: Teeth Alignment based on Pose estimation via multi-scale Graph Convolutional Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Teeth alignment plays an important role in orthodontic treatment. Automating the prediction of teeth alignment target can significantly aid both doctors and patients. Traditional methods often utilize rule-based approach or deep learning method to generate teeth alignment target. However, they usually require extra manual design by doctors, or produce deformed teeth shapes, even fail to address severe misalignment cases. To tackle the problem, we introduce a pose prediction model which can better describe the space representation of the tooth. We also consider geometric information to fully extracted features of teeth. In the meanwhile, we build a multi-scale Graph Convolutional Network(GCN) to characterize the teeth relationships from different levels (global, local, intersection). Finally the target pose of each tooth can be predicted and so the teeth movement from the initial pose to the target pose can be obtained without deforming teeth shapes. Our method has been validated in clinical orthodontic treatment cases and shows promising results both qualitatively and quantitatively.", "title":"TAPoseNet: Teeth Alignment based on Pose estimation via multi-scale Graph Convolutional Network", "authors":[ "Deng, Qingxin", "Yang, Xunyu", "Huang, Minghan", "Jiang, Landu", "Zhang, Dian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":69 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1357_paper.pdf", "bibtext":"@InProceedings{ Xie_DSNet_MICCAI2024,\n author = { Xie, Qihang and Zhang, Dan and Mou, Lei and Wang, Shanshan and Zhao, Yitian and Guo, Mengguo and Zhang, Jiong },\n title = { { DSNet: A Spatio-Temporal Consistency Network for Cerebrovascular Segmentation in Digital Subtraction Angiography Sequences } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Digital Subtraction Angiography (DSA) sequences serve as the foremost diagnostic standard for cerebrovascular diseases (CVDs). Accurate cerebrovascular segmentation in DSA sequences assists clinicians in analyzing pathological changes and pinpointing lesions. However, existing methods commonly utilize a single frame extracted from DSA sequences for cerebrovascular segmentation, disregarding the inherent temporal information within these sequences. This rich temporal information has the potential to achieve better segmentation coherence while reducing the interference caused by artifacts. Therefore, in this paper, we propose a spatio-temporal consistency network for cerebrovascular segmentation in DSA sequences, named DSNet, which fully exploits the information of DSA sequences. Specifically, our DSNet comprises a dual-branch encoder and a dual-branch decoder. The encoder consists of a temporal encoding branch (TEB) and a spatial encoding branch (SEB). The TEB is designed to capture dynamic vessel flow information and the SEB is utilized to extract static vessel structure information. % The Dynamic Frame reWeighting (DFW) module is designed to select frames from DSA sequences dynamically in the TEB skip connection.\nTo effectively capture the correlations among sequential frames, a dynamic frame reweighting module is designed to adjust the weights of the frames. In bottleneck, we exploit a spatio-temporal feature alignment (STFA) module to fuse the features from the encoder to achieve a more comprehensive vascular representation. Moreover, DSNet employs unsupervised loss for consistency regularization between the dual output from the decoder during training. Experimental results demonstrate that DSNet outperforms existing methods, achieving a Dice score of 89.34\\% for cerebrovascular segmentation.", "title":"DSNet: A Spatio-Temporal Consistency Network for Cerebrovascular Segmentation in Digital Subtraction Angiography Sequences", "authors":[ "Xie, Qihang", "Zhang, Dan", "Mou, Lei", "Wang, Shanshan", "Zhao, Yitian", "Guo, Mengguo", "Zhang, Jiong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":70 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3136_paper.pdf", "bibtext":"@InProceedings{ Zho_Refining_MICCAI2024,\n author = { Zhou, Qian and Zou, Hua and Wang, Zhongyuan and Jiang, Haifeng and Wang, Yong },\n title = { { Refining Intraocular Lens Power Calculation: A Multi-modal Framework Using Cross-layer Attention and Effective Channel Attention } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Selecting the appropriate power for intraocular lenses (IOLs) is crucial for the success of cataract surgeries. Traditionally, ophthalmologists rely on manually designed formulas like \u201cBarrett\u201d and \u201cHoffer Q\u201d to calculate IOL power. However, these methods exhibit limited accuracy since they primarily focus on biometric data such as axial length and corneal curvature, overlooking the rich details in preoperative images that reveal the eye\u2019s internal anatomy. In this study, we propose a novel deep learning model that leverages multi-modal information for accurate IOL power calculation. In particular, to address the low information density in optical coherence tomography (OCT) images (i.e., most regions are with zero pixel values), we introduce a cross-layer attention module to take full advantage of hierarchical contextual information to extract comprehensive anatomical features. Additionally, the IOL powers given by traditional formulas are taken as prior knowledge to benefit model training. The proposed method is evaluated on a self-collected dataset consisting of 174 samples and compared with other approaches. The experimental results demonstrate that our approach significantly surpasses competing methods, achieving a mean absolute error of just 0.367 diopters (D). Impressively, the percentage of eyes with a prediction error within \u00b1 0.5 D achieves 84.1%. Furthermore, extensive ablation studies are conducted to validate each component\u2019s contribution and identify the biometric parameters most relevant to accurate IOL power calculation. Codes will be available at https:\/\/github.com\/liyiersan\/IOL.", "title":"Refining Intraocular Lens Power Calculation: A Multi-modal Framework Using Cross-layer Attention and Effective Channel Attention", "authors":[ "Zhou, Qian", "Zou, Hua", "Wang, Zhongyuan", "Jiang, Haifeng", "Wang, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/liyiersan\/IOL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":71 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1438_paper.pdf", "bibtext":"@InProceedings{ Hua_Memoryefficient_MICCAI2024,\n author = { Huang, Kun and Ma, Xiao and Zhang, Yuhan and Su, Na and Yuan, Songtao and Liu, Yong and Chen, Qiang and Fu, Huazhu },\n title = { { Memory-efficient High-resolution OCT Volume Synthesis with Cascaded Amortized Latent Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Optical coherence tomography (OCT) image analysis plays an important role in the field of ophthalmology. Current successful analysis models rely on available large datasets, which can be challenging to be obtained for certain tasks. The use of deep generative models to create realistic data emerges as a promising approach. However, due to limitations in hardware resources, it is still difficulty to synthesize high-resolution OCT volumes. In this paper, we introduce a cascaded amortized latent diffusion model (CA-LDM) that can synthesis high-resolution OCT volumes in a memory-efficient way. First, we propose non-holistic autoencoders to efficiently build a bidirectional mapping between high-resolution volume space and low-resolution latent space. In tandem with autoencoders, we propose cascaded diffusion processes to synthesize high-resolution OCT volumes with a global-to-local refinement process, amortizing the memory and computational demands. Experiments on a public high-resolution OCT dataset show that our synthetic data have realistic high-resolution and global features, surpassing the capabilities of existing methods. Moreover, performance gains on two down-stream fine-grained segmentation tasks demonstrate the benefit of the proposed method in training deep learning models for medical imaging tasks. The code is public available.", "title":"Memory-efficient High-resolution OCT Volume Synthesis with Cascaded Amortized Latent Diffusion Models", "authors":[ "Huang, Kun", "Ma, Xiao", "Zhang, Yuhan", "Su, Na", "Yuan, Songtao", "Liu, Yong", "Chen, Qiang", "Fu, Huazhu" ], "id":"Conference", "arxiv_id":"2405.16516", "GitHub":[ "https:\/\/github.com\/nicetomeetu21\/CA-LDM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":72 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3476_paper.pdf", "bibtext":"@InProceedings{ Don_Cycleconsistent_MICCAI2024,\n author = { Dong, Xiuyu and Wu, Zhengwang and Ma, Laifa and Wang, Ya and Tang, Kaibo and Zhang, He and Lin, Weili and Li, Gang },\n title = { { Cycle-consistent Learning for Fetal Cortical Surface Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fetal cortical surface reconstruction is crucial for quantitative analysis of normal and abnormal prenatal brain development. While there are many cortical surface reconstruction methods available for adults and infants, there remains a notable scarcity of dedicated techniques for fetal cortical surface reconstruction. Of note, fetal brain MR images present unique challenges, characterized by nonuniform low tissue contrast associated with extremely rapid brain development and folding during the prenatal stages and low imaging resolution, as well as susceptibility to severe motion artifacts. Moreover, the smaller size of fetal brains results in much narrower cortical ribbons and sulci. Consequently, the fetal cortical surfaces are more prone to be influenced by partial volume effects and tissue boundary ambiguities. In this work, we develop a multi-task, priori-knowledge supervised fetal cortical surface reconstruction method based on deep learning. Our method incorporates a cycle-consistent strategy, utilizing prior knowledge and multiple stationary velocity fields to enhance its representation capabilities, enabling effective learning of diffeomorphic deformations from the template surface mesh to the inner and outer surfaces. Specifically, our framework involves iteratively refining both inner and outer surfaces in a cyclical manner by mutually guiding each other, thus improving accuracy especially for ambiguous and challenging cortical regions. Evaluation on a fetal MRI dataset with 83 subjects shows the superiority of our method with a geometric error of 0.229 \u00b1 0.047 mm and 0.023 \u00b1 0.058% self-intersecting faces, indicating promising surface geometric and topological accuracy. These results demonstrate a great advancement over state-of-the-art deep learning methods, while maintaining high computational efficiency.", "title":"Cycle-consistent Learning for Fetal Cortical Surface Reconstruction", "authors":[ "Dong, Xiuyu", "Wu, Zhengwang", "Ma, Laifa", "Wang, Ya", "Tang, Kaibo", "Zhang, He", "Lin, Weili", "Li, Gang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":73 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1786_paper.pdf", "bibtext":"@InProceedings{ Pan_Integrating_MICCAI2024,\n author = { Pang, Winnie and Ke, Xueyi and Tsutsui, Satoshi and Wen, Bihan },\n title = { { Integrating Clinical Knowledge into Concept Bottleneck Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Concept bottleneck models (CBMs), which predict human-interpretable concepts (e.g., nucleus shapes in cell images) before predicting the final output (e.g., cell type), provide insights into the decision-making processes of the model. However, training CBMs solely in a data-driven manner can introduce undesirable biases, which may compromise prediction performance, especially when the trained models are evaluated on out-of-domain images (e.g., those acquired using different devices). To mitigate this challenge, we propose integrating clinical knowledge to refine CBMs, better aligning them with clinicians\u2019 decision-making processes. Specifically, we guide the model to prioritize the concepts that clinicians also prioritize. We validate our approach on two datasets of medical images: white blood cell and skin images. Empirical validation demonstrates that incorporating medical guidance enhances the model\u2019s classification performance on unseen datasets with varying preparation methods, thereby increasing its real-world applicability.", "title":"Integrating Clinical Knowledge into Concept Bottleneck Models", "authors":[ "Pang, Winnie", "Ke, Xueyi", "Tsutsui, Satoshi", "Wen, Bihan" ], "id":"Conference", "arxiv_id":"2407.06600", "GitHub":[ "https:\/\/github.com\/PangWinnie0219\/align_concept_cbm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":74 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0967_paper.pdf", "bibtext":"@InProceedings{ Li_MPMNet_MICCAI2024,\n author = { Li, Yuanyuan and Hao, Huaying and Zhang, Dan and Fu, Huazhu and Liu, Mengting and Shan, Caifeng and Zhao, Yitian and Zhang, Jiong },\n title = { { MPMNet: Modal Prior Mutual-support Network for Age-related Macular Degeneration Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Early screening and classification of Age-related Macular Degeneration (AMD) are crucial for precise clinical treatment. Currently, most automated methods focus solely on dry and wet AMD classification. However, the classification of wet AMD into more explicit type 1 choroidal neovascularization (CNV) and type 2 CNV has rarely been explored, despite its significance in intravitreal injection. Furthermore, previous methods predominantly utilized single-modal images for distinguishing AMD types, while multi-modal images can provide a more comprehensive representation of pathological changes for accurate diagnosis. \nIn this paper, we propose a Modal Prior Mutual-support Network (MPMNet), which for the first time combines OCTA images and OCT sequences for the classification of normal, dry AMD, type 1 CNV, and type 2 CNV. Specifically, we first employ a multi-branch encoder to extract modality-Specific features. \nA novel modal prior mutual-support mechanism is proposed, which determines the primary and auxiliary modalities based on the sensitivity of different modalities to lesions and makes joint decisions. In this mechanism, a distillation loss is employed to enforce the consistency between single-modal decisions and joint decisions. It can facilitate networks to focus on specific pathological information within individual modalities. \nFurthermore, we propose a mutual information-guided feature dynamic adjustment strategy.\nThis strategy adjusts the channel weights of the two modalities by computing the mutual information between OCTA and OCT, thereby mitigating the influence of low-quality modal features on the network\u2019s robustness.\nExperiments on private and public datasets have demonstrated that the proposed MPMNet outperforms existing state-of-the-art methods.", "title":"MPMNet: Modal Prior Mutual-support Network for Age-related Macular Degeneration Classification", "authors":[ "Li, Yuanyuan", "Hao, Huaying", "Zhang, Dan", "Fu, Huazhu", "Liu, Mengting", "Shan, Caifeng", "Zhao, Yitian", "Zhang, Jiong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":75 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3884_paper.pdf", "bibtext":"@InProceedings{ Tan_Fetal_MICCAI2024,\n author = { Tan, Junpeng and Zhang, Xin and Qing, Chunmei and Yang, Chaoxiang and Zhang, He and Li, Gang and Xu, Xiangmin },\n title = { { Fetal MRI Reconstruction by Global Diffusion and Consistent Implicit Representation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Although the utilization of multi-stacks can solve fetal MRI motion correction and artifact removal problems, there are still problems of regional intensity heterogeneity, and global consistency discrimination in 3D space. To this end, we propose a novel coarse-to-fine self-supervised fetal brain MRI Radiation Diffusion Generation Model (RDGM). Firstly, we propose a novel self-supervised regionally Consistent Implicit Neural Representation (CINR) network with a double-spatial voxel association consistency mechanism to solve regional intensity heterogeneity. CINR enhances regional 3D voxel association and complementarity by two-voxel mapping spaces to generate coarse MRI. We also fine-tune the weighted slice reconstruction loss to improve the network reconstruction performance. Moreover, we propose the Global Diffusion Discriminative Generation (GDDG) fine module to enhance volume global consistency and discrimination. The noise diffusion is used to transform the global intensity discriminant information in 3D volume. The experiments on two real-world fetal MRI datasets demonstrate that RDGM achieves state-of-the-art results.", "title":"Fetal MRI Reconstruction by Global Diffusion and Consistent Implicit Representation", "authors":[ "Tan, Junpeng", "Zhang, Xin", "Qing, Chunmei", "Yang, Chaoxiang", "Zhang, He", "Li, Gang", "Xu, Xiangmin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":76 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1674_paper.pdf", "bibtext":"@InProceedings{ Zen_ABP_MICCAI2024,\n author = { Zeng, Xinyi and Zeng, Pinxian and Cui, Jiaqi and Li, Aibing and Liu, Bo and Wang, Chengdi and Wang, Yan },\n title = { { ABP: Asymmetric Bilateral Prompting for Text-guided Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning-based segmentation models have made remarkable progress in aiding pulmonary disease diagnosis by segmenting lung lesion areas in large amounts of annotated X-ray images. Recently, to alleviate the demand for medical image data and further improve segmentation performance, various studies have extended mono-modal models to incorporate additional modalities, such as diagnostic textual notes. Despite the prevalent utilization of cross-attention mechanisms or their variants to model interactions between visual and textual features, current text-guided medical image segmentation approaches still face limitations. These include a lack of adaptive adjustments for text tokens to accommodate variations in image contexts, as well as a deficiency in exploring and utilizing text-prior information. To mitigate these limitations, we propose Asymmetric Bilateral Prompting (ABP), a novel method tailored for text-guided medical image segmentation. Specifically, we introduce an ABP block preceding each up-sample stage in the image decoder. This block first integrates a symmetric bilateral cross-attention module for both textual and visual branches to model preliminary multi-modal interactions. Then, guided by the opposite modality, two asymmetric operations are employed for further modality-specific refinement. Notably, we utilize attention scores from the image branch as attentiveness rankings to prune and remove redundant text tokens, ensuring that the image features are progressively interacted with more attentive text tokens during up-sampling. Asymmetrically, we integrate attention scores from the text branch as text-prior information to enhance visual representations and target predictions in the visual branch. Experimental results on the QaTa-COV19 dataset validate the superiority of our proposed method.", "title":"ABP: Asymmetric Bilateral Prompting for Text-guided Medical Image Segmentation", "authors":[ "Zeng, Xinyi", "Zeng, Pinxian", "Cui, Jiaqi", "Li, Aibing", "Liu, Bo", "Wang, Chengdi", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":77 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1289_paper.pdf", "bibtext":"@InProceedings{ Li_An_MICCAI2024,\n author = { Li, Qing and Zhang, Yizhe and Li, Yan and Lyu, Jun and Liu, Meng and Sun, Longyu and Sun, Mengting and Li, Qirong and Mao, Wenyue and Wu, Xinran and Zhang, Yajing and Chu, Yinghua and Wang, Shuo and Wang, Chengyan },\n title = { { An Empirical Study on the Fairness of Foundation Models for Multi-Organ Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The segmentation foundation model, e.g., Segment Anything Model (SAM), has attracted increasing interest in the medical image community. Early pioneering studies primarily concentrated on assessing and improving SAM\u2019s performance from the perspectives of overall accuracy and efficiency, yet little attention was given to the fairness considerations. This oversight raises questions about the potential for performance biases that could mirror those found in task-specific deep learning models like nnU-Net. In this paper, we explored the fairness dilemma concerning large segmentation foundation models. We prospectively curate a benchmark dataset of 3D MRI and CT scans of the organs including liver, kidney, spleen, lung and aorta from a total of 1054 healthy subjects with expert segmentations. Crucially, we document demographic details such as gender, age, and body mass index (BMI) for each subject to facilitate a nuanced fairness analysis. We test state-of-the-art foundation models for medical image segmentation, including the original SAM, medical SAM and SAT models, to evaluate segmentation efficacy across different demographic groups and identify disparities. Our comprehensive analysis, which accounts for various confounding factors, reveals significant fairness concerns within these foundational models. Moreover, our findings highlight not only disparities in overall segmentation metrics, such as the Dice Similarity Coefficient but also significant variations in the spatial distribution of segmentation errors, offering empirical evidence of the nuanced challenges in ensuring fairness in medical image segmentation.", "title":"An Empirical Study on the Fairness of Foundation Models for Multi-Organ Image Segmentation", "authors":[ "Li, Qing", "Zhang, Yizhe", "Li, Yan", "Lyu, Jun", "Liu, Meng", "Sun, Longyu", "Sun, Mengting", "Li, Qirong", "Mao, Wenyue", "Wu, Xinran", "Zhang, Yajing", "Chu, Yinghua", "Wang, Shuo", "Wang, Chengyan" ], "id":"Conference", "arxiv_id":"2406.12646", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":78 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0115_paper.pdf", "bibtext":"@InProceedings{ Tia_TaGAT_MICCAI2024,\n author = { Tian, Xin and Anantrasirichai, Nantheera and Nicholson, Lindsay and Achim, Alin },\n title = { { TaGAT: Topology-Aware Graph Attention Network For Multi-modal Retinal Image Fusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the realm of medical image fusion, integrating information from various modalities is crucial for improving diagnostics and treatment planning, especially in retinal health, where the important features exhibit differently in different imaging modalities. Existing deep learning-based approaches insufficiently focus on retinal image fusion, and thus fail to preserve enough anatomical structure and fine vessel details in retinal image fusion. To address this, we propose the Topology-Aware Graph Attention Network (TaGAT) for multi-modal retinal image fusion, leveraging a novel Topology-Aware Encoder (TAE) with Graph Attention Networks (GAT) to effectively enhance spatial features with retinal vasculature\u2019s graph topology across modalities. The TAE encodes the base and detail features, extracted via a Long-short Range (LSR) encoder from retinal images, into the graph extracted from the retinal vessel. Within the TAE, the GAT-based Graph Information Update block dynamically refines and aggregates the node features to generate topology-aware graph features. The updated graph features with base and detail features are combined and decoded as a fused image. Our model outperforms state-of-the-art methods in Fluorescein Fundus Angiography (FFA) with Color Fundus (CF) and Optical Coherence Tomography (OCT) with confocal microscopy retinal image fusion. The source code can be accessed via https:\/\/github.com\/xintian-99\/TaGAT.", "title":"TaGAT: Topology-Aware Graph Attention Network For Multi-modal Retinal Image Fusion", "authors":[ "Tian, Xin", "Anantrasirichai, Nantheera", "Nicholson, Lindsay", "Achim, Alin" ], "id":"Conference", "arxiv_id":"2407.14188", "GitHub":[ "https:\/\/github.com\/xintian-99\/TaGAT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":79 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0674_paper.pdf", "bibtext":"@InProceedings{ Liu_Cut_MICCAI2024,\n author = { Liu, Chang and Fan, Fuxin and Schwarz, Annette and Maier, Andreas },\n title = { { Cut to the Mix: Simple Data Augmentation Outperforms Elaborate Ones in Limited Organ Segmentation Datasets } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multi-organ segmentation is a widely applied clinical routine and automated organ segmentation tools dramatically improve the pipeline of the radiologists. Recently, deep learning (DL) based segmentation models have shown the capacity to accomplish such a task. However, the training of the segmentation networks requires large amount of data with manual annotations, which is a major concern due to the data scarcity from clinic. Working with limited data is still common for researches on novel imaging modalities. To enhance the effectiveness of DL models trained with limited data, data augmentation (DA) is a crucial regularization technique. Traditional DA (TDA) strategies focus on basic intra-image operations, i.e. generating images with different orientations and intensity distributions. In contrast, the inter-image and object-level DA operations are able to create new images from separate individuals. However, such DA strategies are not well explored on the task of multi-organ segmentation. In this paper, we investigated four possible inter-image DA strategies: CutMix, CarveMix, ObjectAug and AnatoMix, on two organ segmentation datasets. The result shows that CutMix, CarveMix and AnatoMix can improve the average dice score by 4.9, 2.0 and 1.9, compared with the state-of-the-art nnUNet without DA strategies. These results can be further improved by adding TDA strategies. It is revealed in our experiments that CutMix is a robust but simple DA strategy to drive up the segmentation performance for multi-organ segmentation, even when CutMix produces intuitively \u2018wrong\u2019 images. We present our implementation as a DA toolkit for multi-organ segmentation on GitHub for future benchmarks.", "title":"Cut to the Mix: Simple Data Augmentation Outperforms Elaborate Ones in Limited Organ Segmentation Datasets", "authors":[ "Liu, Chang", "Fan, Fuxin", "Schwarz, Annette", "Maier, Andreas" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Rebooorn\/mosDAtoolkit" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":80 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0251_paper.pdf", "bibtext":"@InProceedings{ Oh_Controllable_MICCAI2024,\n author = { Oh, Hyun-Jic and Jeong, Won-Ki },\n title = { { Controllable and Efficient Multi-Class Pathology Nuclei Data Augmentation using Text-Conditioned Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the field of computational pathology, deep learning algorithms have made significant progress in tasks such as nuclei segmentation and classification. However, the potential of these advanced methods is limited by the lack of available labeled data. Although image synthesis via recent generative models has been actively explored to address this challenge, existing works have barely addressed label augmentation and are mostly limited to single-class and unconditional label generation. In this paper, we introduce a novel two-stage framework for multi-class nuclei data augmentation using text-conditional diffusion models. In the first stage, we innovate nuclei label synthesis by generating multi-class semantic labels and corresponding instance maps through a joint diffusion model conditioned by text prompts that specify the label structure information. In the second stage, we utilize a semantic and text-conditional latent diffusion model to efficiently generate high-quality pathology images that align with the generated nuclei label images. We demonstrate the effectiveness of our method on large and diverse pathology nuclei datasets, with evaluations including qualitative and quantitative analyses, as well as assessments of downstream tasks.", "title":"Controllable and Efficient Multi-Class Pathology Nuclei Data Augmentation using Text-Conditioned Diffusion Models", "authors":[ "Oh, Hyun-Jic", "Jeong, Won-Ki" ], "id":"Conference", "arxiv_id":"2407.14426", "GitHub":[ "https:\/\/github.com\/hvcl\/ConNucDA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":81 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2226_paper.pdf", "bibtext":"@InProceedings{ Bar_MARVEL_MICCAI2024,\n author = { Barrier, Antoine and Coudert, Thomas and Delphin, Aur\u00e9lien and Lemasson, Benjamin and Christen, Thomas },\n title = { { MARVEL: MR Fingerprinting with Additional micRoVascular Estimates using bidirectional LSTMs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Magnetic Resonance Fingerprinting (MRF) approach aims to estimate multiple MR or physiological parameters simultaneously with a single fast acquisition sequence. Most of the MRF studies proposed so far have used simple MR sequence types to measure relaxation times (T1, T2). In that case, deep learning algorithms have been successfully used to speed up the reconstruction process. In theory, the MRF concept could be used with a variety of other MR sequence types and should be able to provide more information about the tissue microstructures. Yet, increasing the complexity of the numerical models often leads to prohibited simulation times, and estimating multiple parameters from one sequence implies new dictionary dimensions whose sizes become too large for standard computers and DL architectures.\nIn this paper, we propose to analyze the MRF signal coming from a complex balanced Steady-State Free Precession (bSSFP) type sequence to simultaneously estimate relaxometry maps (T1, T2), Field maps (B1, B0) as well as microvascular properties such as the local Cerebral Blood Volume (CBV) or the averaged vessel Radius (R).\nTo bypass the curse of dimensionality, we propose an efficient way to simulate the MR signal coming from numerical voxels containing realistic microvascular networks as well as a Bidirectional Long Short-Term Memory network that replaces the matching process.\nOn top of standard MRF maps, our results on 3 human volunteers suggest that our approach can quickly produce high-quality quantitative maps of microvascular parameters that are otherwise obtained using longer dedicated sequences and intravenous injection of a contrast agent. This approach could be used for the management of multiple pathologies and could be tuned to provide other types of microstructural information.", "title":"MARVEL: MR Fingerprinting with Additional micRoVascular Estimates using bidirectional LSTMs", "authors":[ "Barrier, Antoine", "Coudert, Thomas", "Delphin, Aur\u00e9lien", "Lemasson, Benjamin", "Christen, Thomas" ], "id":"Conference", "arxiv_id":"2407.10512", "GitHub":[ "https:\/\/github.com\/nifm-gin\/MARVEL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":82 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2135_paper.pdf", "bibtext":"@InProceedings{ Xu_LBUNet_MICCAI2024,\n author = { Xu, Jiahao and Tong, Lyuyang },\n title = { { LB-UNet: A Lightweight Boundary-assisted UNet for Skin Lesion Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Skin lesion segmentation is vital in computer-aided diagnosis and treatment of skin diseases. UNet and its variants have been widely utilized for skin lesion segmentation. However, resource constraints limit the deployment of larger parameter models on edge devices. To address this issue, we propose a novel lightweight boundary-assisted UNet (LB-UNet) for skin lesion segmentation. LB-UNet incorporates the Group Shuffle Attention module (GSA) to significantly reduce the model\u2019s parameters and computational demands. Furthermore, to enhance the model\u2019s segmentation capability, especially in handling ambiguous boundary, LB-UNet introduces the Prediction Map Auxiliary module (PMA). Briefly, PMA consists of three modules: (1) Segmentation Region and Boundary Prediction module is utilized to predict the segmentation region and boundary of the decoder features; (2) GA-Based Boundary Generator is employed to generate the ground truth boundary map through genetic algorithm; (3) Prediction Information Fusion module enhances the skip connection by leveraging the prediction information. By combining this modules, the region and boundary information is effectively integrated into the backbone. The experiment results on the ISIC2017 and ISIC2018 datasets demonstrate that LB-UNet outperforms current lightweight methods. To the best of our knowledge, LB-UNet the first model with a parameters count limited to 38KB and Giga-Operations Per Second (GFLOPs) limited to 0.1. The codes and trained models are publicly available at https:\/\/github.com\/xuxuxuxuxuxjh\/LB-UNet.", "title":"LB-UNet: A Lightweight Boundary-assisted UNet for Skin Lesion Segmentation", "authors":[ "Xu, Jiahao", "Tong, Lyuyang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xuxuxuxuxuxjh\/LB-UNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":83 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3991_paper.pdf", "bibtext":"@InProceedings{ Zho_PathM3_MICCAI2024,\n author = { Zhou, Qifeng and Zhong, Wenliang and Guo, Yuzhi and Xiao, Michael and Ma, Hehuan and Huang, Junzhou },\n title = { { PathM3: A Multimodal Multi-Task Multiple Instance Learning Framework for Whole Slide Image Classification and Captioning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the field of computational histopathology, both whole slide images (WSIs) and diagnostic captions provide valuable insights for making diagnostic decisions. However, aligning WSIs with diagnostic captions presents a significant challenge. This difficulty arises from two main factors: 1) Gigapixel WSIs are unsuitable for direct input into deep learning models, and the redundancy and correlation among the patches demand more attention; and 2) Authentic WSI diagnostic captions are extremely limited, making it difficult to train an effective model. To overcome these obstacles, we present PathM3, a multimodal, multi-task, multiple instance learning (MIL) framework for WSI classification and captioning. PathM3 adapts a query-based transformer to effectively align WSIs with diagnostic captions. Given that histopathology visual patterns are redundantly distributed across WSIs, we aggregate each patch feature with MIL method that considers the correlations among instances. Furthermore, our PathM3 overcomes data scarcity in WSI-level captions by leveraging limited WSI diagnostic caption data in the manner of multi-task joint learning. Extensive experiments with improved classification accuracy and caption generation demonstrate the effectiveness of our method on both WSI classification and captioning task.", "title":"PathM3: A Multimodal Multi-Task Multiple Instance Learning Framework for Whole Slide Image Classification and Captioning", "authors":[ "Zhou, Qifeng", "Zhong, Wenliang", "Guo, Yuzhi", "Xiao, Michael", "Ma, Hehuan", "Huang, Junzhou" ], "id":"Conference", "arxiv_id":"2403.08967", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":84 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2709_paper.pdf", "bibtext":"@InProceedings{ Wan_fTSPL_MICCAI2024,\n author = { Wang, Pengyu and Zhang, Huaqi and He, Zhibin and Peng, Zhihao and Yuan, Yixuan },\n title = { { fTSPL: Enhancing Brain Analysis with fMRI-Text Synergistic Prompt Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Using functional Magnetic Resonance Imaging (fMRI) to construct the functional connectivity is a well-established paradigm for deep learning-based brain analysis. Recently, benefiting from the remarkable effectiveness and generalization brought by large-scale multi-modal pre-training data, Vision-Language (V-L) models have achieved excellent performance in numerous medical tasks. However, applying the pre-trained V-L model to brain analysis presents two significant challenges: (1) The lack of paired fMRI-text data; (2) The construction of functional connectivity from multi-modal data. To tackle these challenges, we propose a fMRI-Text Synergistic Prompt Learning (fTSPL) pipeline, which utilizes the pre-trained V-L model to enhance brain analysis for the first time. In fTSPL, we first propose an Activation-driven Brain-region Text Generation (ABTG) scheme that can automatically generate instance-level texts describing each fMRI, and then leverage the V-L model to learn multi-modal fMRI and text representations. We also propose a Prompt-boosted Multi-modal Functional Connectivity Construction (PMFCC) scheme by establishing the correlations between fMRI-text representations and brain-region embeddings. This scheme serves as a plug-and-play preliminary that can connect with various Graph Neural Networks (GNNs) for brain analysis. Experiments on ABIDE and HCP datasets demonstrate that our pipeline outperforms state-of-the-art methods on brain classification and prediction tasks. The code is available at https:\/\/github.com\/CUHK-AIM-Group\/fTSPL.", "title":"fTSPL: Enhancing Brain Analysis with fMRI-Text Synergistic Prompt Learning", "authors":[ "Wang, Pengyu", "Zhang, Huaqi", "He, Zhibin", "Peng, Zhihao", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/fTSPL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":85 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1307_paper.pdf", "bibtext":"@InProceedings{ Wan_Prior_MICCAI2024,\n author = { Wang, Qingbin and Wong, Wai Chon and Yin, Mi and Ma, Yutao },\n title = { { Prior Activation Map Guided Cervical OCT Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cervical cancer poses a severe threat to women\u2019s health globally. As a non-invasive imaging modality, cervical optical coherence tomography (OCT) rapidly generates micrometer-resolution images from the cervix, comparable nearly to histopathology. However, the scarcity of high-quality labeled OCT images and the inevitable speckle noise impede deep-learning models from extracting discriminative features of high-risk lesion images. This study utilizes segmentation masks and bounding boxes to construct prior activation maps (PAMs) that encode pathologists\u2019 diagnostic insights into different cervical disease categories in OCT images. These PAMs guide the classification model in producing reasonable class activation maps during training, enhancing interpretability and performance to meet gynecologists\u2019 needs. Experiments using five-fold cross-validation demonstrate that the PAM-guided classification model boosts the classification of high-risk lesions on three datasets. Besides, our method enhances histopathology-based interpretability to assist gynecologists in analyzing cervical OCT images efficiently, advancing the integration of deep learning in clinical practice.", "title":"Prior Activation Map Guided Cervical OCT Image Classification", "authors":[ "Wang, Qingbin", "Wong, Wai Chon", "Yin, Mi", "Ma, Yutao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ssea-lab\/AMGuided_Cervical_OCT_Classification" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":86 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0919_paper.pdf", "bibtext":"@InProceedings{ Tan_Interpretable_MICCAI2024,\n author = { Tang, Haoteng and Liu, Guodong and Dai, Siyuan and Ye, Kai and Zhao, Kun and Wang, Wenlu and Yang, Carl and He, Lifang and Leow, Alex and Thompson, Paul and Huang, Heng and Zhan, Liang },\n title = { { Interpretable Spatio-Temporal Embedding for Brain Structural-Effective Network with Ordinary Differential Equation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The MRI-derived brain network serves as a pivotal instrument in elucidating both the structural and functional aspects of the brain, encompassing the ramifications of diseases and developmental processes. However, prevailing methodologies, often focusing on synchronous BOLD signals from functional MRI (fMRI), may not capture directional influences among brain regions and rarely tackle temporal functional dynamics. \nIn this study, we first construct the brain-effective network via the dynamic causal model. Subsequently, we introduce an interpretable graph learning framework termed Spatio-Temporal Embedding ODE (STE-ODE). This framework incorporates specifically designed directed node embedding layers, aiming at capturing the dynamic interplay between structural and effective networks via an ordinary differential equation (ODE) model, which characterizes spatial-temporal brain dynamics. Our framework is validated on several clinical phenotype prediction tasks using two independent publicly available datasets (HCP and OASIS). The experimental results clearly demonstrate the advantages of our model compared to several state-of-the-art methods.", "title":"Interpretable Spatio-Temporal Embedding for Brain Structural-Effective Network with Ordinary Differential Equation", "authors":[ "Tang, Haoteng", "Liu, Guodong", "Dai, Siyuan", "Ye, Kai", "Zhao, Kun", "Wang, Wenlu", "Yang, Carl", "He, Lifang", "Leow, Alex", "Thompson, Paul", "Huang, Heng", "Zhan, Liang" ], "id":"Conference", "arxiv_id":"2405.13190", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":87 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3584_paper.pdf", "bibtext":"@InProceedings{ Koc_DinoBloom_MICCAI2024,\n author = { Koch, Valentin and Wagner, Sophia J. and Kazeminia, Salome and Sancar, Ece and Hehr, Matthias and Schnabel, Julia A. and Peng, Tingying and Marr, Carsten },\n title = { { DinoBloom: A Foundation Model for Generalizable Cell Embeddings in Hematology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In hematology, computational models offer significant potential to improve diagnostic accuracy, streamline workflows, and reduce the tedious work of analyzing single cells in peripheral blood or bone marrow smears. However, clinical adoption of computational models has been hampered by the lack of generalization due to large batch effects, small dataset sizes, and poor performance in transfer learning from natural images. To address these challenges, we introduce DinoBloom, the first foundation model for single cell images in hematology, utilizing a tailored DINOv2 pipeline. Our model is built upon an extensive collection of 13 diverse, publicly available datasets of peripheral blood and bone marrow smears, the most substantial open-source cohort in hematology so far, comprising over 380,000 white blood cell images.\nTo assess its generalization capability, we evaluate it on an external dataset with a challenging domain shift. We show that our model outperforms existing medical and non-medical vision models in (i) linear probing and k-nearest neighbor evaluations on blood and bone marrow smears and (ii) weakly supervised multiple instance learning for acute myeloid leukemia subtyping by a large margin. \nA family of four DinoBloom models (small, base, large, and giant) can be adapted for a wide range of downstream applications, be a strong baseline for classification problems, and facilitate the assessment of batch effects in new datasets. All models are available at github.com\/marrlab\/DinoBloom.", "title":"DinoBloom: A Foundation Model for Generalizable Cell Embeddings in Hematology", "authors":[ "Koch, Valentin", "Wagner, Sophia J.", "Kazeminia, Salome", "Sancar, Ece", "Hehr, Matthias", "Schnabel, Julia A.", "Peng, Tingying", "Marr, Carsten" ], "id":"Conference", "arxiv_id":"2404.05022", "GitHub":[ "github.com\/marrlab\/DinoBloom" ], "paper_page":"https:\/\/huggingface.co\/papers\/2404.05022", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":8, "Models":[ "1aurent\/vit_base_patch14_224.dinobloom", "1aurent\/vit_small_patch14_224.dinobloom", "1aurent\/vit_large_patch14_224.dinobloom", "1aurent\/vit_giant_patch14_224.dinobloom" ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ "1aurent\/vit_base_patch14_224.dinobloom", "1aurent\/vit_small_patch14_224.dinobloom", "1aurent\/vit_large_patch14_224.dinobloom", "1aurent\/vit_giant_patch14_224.dinobloom" ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":88 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3604_paper.pdf", "bibtext":"@InProceedings{ Mob_Harnessing_MICCAI2024,\n author = { Mobadersany, Pooya and Parmar, Chaitanya and Damasceno, Pablo F. and Fadnavis, Shreyas and Chaitanya, Krishna and Li, Shilong and Schwab, Evan and Xiao, Jaclyn and Surace, Lindsey and Mansi, Tommaso and Cula, Gabriela Oana and Ghanem, Louis R. and Standish, Kristopher },\n title = { { Harnessing Temporal Information for Precise Frame-Level Predictions in Endoscopy Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Camera localization in endoscopy videos plays a fundamental role in enabling precise diagnosis and effective treatment planning for patients with Inflammatory Bowel Disease (IBD). Precise frame-level classification, however, depends on long-range temporal dynamics, ranging from hundreds to tens of thousands of frames per video, challenging current neural network approaches. To address this, we propose EndoFormer, a frame-level classification model that leverages long-range temporal information for anatomic segment classification in gastrointestinal endoscopy videos. EndoFormer combines a Foundation Model block, judicious video-level augmentations, and a Transformer classifier for frame-level classification while maintaining a small memory footprint. Experiments on 4160 endoscopy videos from four clinical trials and over 61 million frames demonstrate that EndoFormer has an AUC=0.929, significantly improving state-of-the-art models for anatomic segment classification. These results highlight the potential for adopting EndoFormer in endoscopy video analysis applications that require long-range temporal dynamics for precise frame-level predictions.", "title":"Harnessing Temporal Information for Precise Frame-Level Predictions in Endoscopy Videos", "authors":[ "Mobadersany, Pooya", "Parmar, Chaitanya", "Damasceno, Pablo F.", "Fadnavis, Shreyas", "Chaitanya, Krishna", "Li, Shilong", "Schwab, Evan", "Xiao, Jaclyn", "Surace, Lindsey", "Mansi, Tommaso", "Cula, Gabriela Oana", "Ghanem, Louis R.", "Standish, Kristopher" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":89 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1958_paper.pdf", "bibtext":"@InProceedings{ Zha_IPLC_MICCAI2024,\n author = { Zhang, Guoning and Qi, Xiaoran and Yan, Bo and Wang, Guotai },\n title = { { IPLC: Iterative Pseudo Label Correction Guided by SAM for Source-Free Domain Adaptation in Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Source-Free Domain Adaptation (SFDA) is important for dealing with domain shift without access to source data and labels of target domain images for medical image segmentation. However, existing SFDA methods have limited performance due to insufficient supervision and unreliable pseudo labels. To address this issue, we propose a novel Iterative Pseudo Label Correction (IPLC) guided by the Segment Anything Model (SAM) SFDA framework for medical image segmentation. Specifically, with a pre-trained source model and SAM, we propose multiple random sampling and entropy estimation to obtain robust pseudo labels and mitigate the noise. We introduce mean negative curvature minimization to provide more sufficient constraints and achieve smoother segmentation. We also propose an Iterative Correction Learning (ICL) strategy to iteratively generate reliable pseudo labels with updated prompts for domain adaptation. Experiments on a public multi-site heart MRI segmentation dataset (M&MS) demonstrate that our method effectively improved the quality of pseudo labels and outperformed several state-of-the-art SFDA methods. The code is available at https:\/\/github.com\/HiLab-git\/IPLC.", "title":"IPLC: Iterative Pseudo Label Correction Guided by SAM for Source-Free Domain Adaptation in Medical Image Segmentation", "authors":[ "Zhang, Guoning", "Qi, Xiaoran", "Yan, Bo", "Wang, Guotai" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HiLab-git\/IPLC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":90 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0240_paper.pdf", "bibtext":"@InProceedings{ Xu_StereoDiffusion_MICCAI2024,\n author = { Xu, Haozheng and Xu, Chi and Giannarou, Stamatia },\n title = { { StereoDiffusion: Temporally Consistent Stereo Depth Estimation with Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In Minimally Invasive Surgery (MIS), temporally consistent depth estimation is necessary for accurate intraoperative surgical navigation and robotic control. Despite the plethora of stereo depth estimation methods, estimating temporally consistent disparity is still challenging due to scene and camera dynamics. The aim of this paper is to introduce the StereoDiffusion framework for temporally consistent disparity estimation. For the first time, a latent diffusion model is incorporated into stereo depth estimation. Advancing existing depth estimation methods based on diffusion models, StereoDiffusion uses prior knowledge to refine disparity. Prior knowledge is generated using optical flow to warp the disparity map of the previous frame and predict a reprojected disparity map in the current frame to be refined. For efficient inference, fewer denoising steps and an efficient denoising scheduler have been used. Extensive validation on MIS stereo datasets and comparison to state-of-the-art (SOTA) methods show that StereoDiffusion achieves best performance and provides temporally consistent disparity estimation with high-fidelity details, despite having been trained on natural scenes only.", "title":"StereoDiffusion: Temporally Consistent Stereo Depth Estimation with Diffusion Models", "authors":[ "Xu, Haozheng", "Xu, Chi", "Giannarou, Stamatia" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xuhaozheng\/StereoDiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":91 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1369_paper.pdf", "bibtext":"@InProceedings{ Boy_MEGFormer_MICCAI2024,\n author = { Boyko, Maria and Druzhinina, Polina and Kormakov, Georgii and Beliaeva, Aleksandra and Sharaev, Maxim },\n title = { { MEGFormer: enhancing speech decoding from brain activity through extended semantic representations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Even though multiple studies have examined the decoding of speech from brain activity through non-invasive technologies in recent years, the task still presents a challenge as decoding quality is still insufficient for practical applications. An effective solution could help in the advancement of brain-computer interfaces (BCIs), potentially enabling communication restoration for individuals experiencing speech impairments. At the same time, these studies can provide fundamental insights into how the brain processes speech and sound.\nOne of the approaches for decoding perceived speech involves using a self-supervised model that has been trained using contrastive learning. This model matches segments of the same length from magnetoencephalography (MEG) to audio in a zero-shot way. We improve the method for decoding perceived speech by incorporating a new architecture based on CNN Transformer. As a result of proposed modifications, the accuracy of perceived speech decoding increases significantly from current 69\\% to 83\\% and from 67\\% to 70\\% on publicly available datasets. Notably, the greatest improvement in accuracy is observed in longer speech fragments that carry semantic meaning, rather than in shorter fragments with sounds and phonemes.\nOur code is available at https:\/\/github.com\/maryjis\/MEGformer", "title":"MEGFormer: enhancing speech decoding from brain activity through extended semantic representations", "authors":[ "Boyko, Maria", "Druzhinina, Polina", "Kormakov, Georgii", "Beliaeva, Aleksandra", "Sharaev, Maxim" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/maryjis\/MEGformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":92 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1856_paper.pdf", "bibtext":"@InProceedings{ Wei_Enhanced_MICCAI2024,\n author = { Wei, Ruofeng and Li, Bin and Chen, Kai and Ma, Yiyao and Liu, Yunhui and Dou, Qi },\n title = { { Enhanced Scale-aware Depth Estimation for Monocular Endoscopic Scenes with Geometric Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Scale-aware monocular depth estimation poses a significant challenge in computer-aided endoscopic navigation. However, existing depth estimation methods that do not consider the geometric priors struggle to learn the absolute scale from training with monocular endoscopic sequences. Additionally, conventional methods face difficulties in accurately estimating details on tissue and instruments boundaries. In this paper, we tackle these problems by proposing a novel enhanced scale-aware framework that only uses monocular images with geometric modeling for depth estimation. Specifically, we first propose a multi-resolution depth fusion strategy to enhance the quality of monocular depth estimation. To recover the precise scale between relative depth and real-world values, we further calculate the 3D poses of instruments in the endoscopic scenes by algebraic geometry based on the image-only geometric primitives (i.e., boundaries and tip of instruments). Afterwards, the 3D poses of surgical instruments enable the scale recovery of relative depth maps. By coupling scale factors and relative depth estimation, the scale aware depth of the monocular endoscopic scenes can be estimated. We evaluate the pipeline on in-house endoscopic surgery videos and simulated data. The results demonstrate that our method can learn the absolute scale with geometric modeling and accurately estimate scale-aware depth for monocular scenes. Code is available at: https:\/\/github.com\/med-air\/MonoEndoDepth", "title":"Enhanced Scale-aware Depth Estimation for Monocular Endoscopic Scenes with Geometric Modeling", "authors":[ "Wei, Ruofeng", "Li, Bin", "Chen, Kai", "Ma, Yiyao", "Liu, Yunhui", "Dou, Qi" ], "id":"Conference", "arxiv_id":"2408.07266", "GitHub":[ "https:\/\/github.com\/med-air\/MonoEndoDepth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":93 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1442_paper.pdf", "bibtext":"@InProceedings{ Hua_Optimizing_MICCAI2024,\n author = { Huang, Yifei and Shen, Chuyun and Li, Wenhao and Wang, Xiangfeng and Jin, Bo and Cai, Haibin },\n title = { { Optimizing Efficiency and Effectiveness in Sequential Prompt Strategy for SAM using Reinforcement Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the rapidly advancing field of medical image analysis, Interactive Medical Image Segmentation (IMIS) plays a crucial role in augmenting diagnostic precision. \nWithin the realm of IMIS, the Segment Anything Model (SAM), trained on natural images, demonstrates zero-shot capabilities when applied to medical images as the foundation model.\nNevertheless, SAM has been observed to display considerable sensitivity to variations in interaction forms within interactive sequences, introducing substantial uncertainty into the interaction segmentation process. \nConsequently, the identification of optimal temporal prompt forms is essential for guiding clinicians in their utilization of SAM. \nFurthermore, determining the appropriate moment to terminate an interaction represents a delicate balance between efficiency and effectiveness.\nFor providing sequential optimal prompt forms and best stopping time, we introduce an \\textbf{A}daptive \\textbf{I}nteraction and \\textbf{E}arly \\textbf{S}topping mechanism, named \\textbf{AIES}.\nThis mechanism models the IMIS process as a Markov Decision Process (MDP) and employs a Deep Q-network (DQN) with an adaptive penalty mechanism to optimize interaction forms and ascertain the optimal cessation point when implementing SAM.\nUpon evaluation using three public datasets, AIES identified an efficient and effective prompt strategy that significantly reduced interaction costs while achieving better segmentation accuracy than the rule-based method.", "title":"Optimizing Efficiency and Effectiveness in Sequential Prompt Strategy for SAM using Reinforcement Learning", "authors":[ "Huang, Yifei", "Shen, Chuyun", "Li, Wenhao", "Wang, Xiangfeng", "Jin, Bo", "Cai, Haibin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":94 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0973_paper.pdf", "bibtext":"@InProceedings{ Xie_Multidisease_MICCAI2024,\n author = { Xie, Jianyang and Chen, Xiuju and Zhao, Yitian and Meng, Yanda and Zhao, He and Nguyen, Anh and Li, Xiaoxin and Zheng, Yalin },\n title = { { Multi-disease Detection in Retinal Images Guided by Disease Causal Estimation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"There have been significant advancements in analyzing retinal images for the diagnosis of eye diseases and other systemic conditions. However, a key challenge is multi-disease detection, particularly in addressing the demands of real-world applications where a patient may have more than one condition. To address this challenge, this study introduces a novel end-to-end approach to multi-disease detection using retinal images guided by disease causal estimation. This model leverages disease-specific features, integrating disease causal relationships and interactions between image features and disease conditions. Specifically, 1) the interactions between disease and image features are captured by cross-attention in a transformer decoder. 2) The causal relationships among diseases are automatically estimated as the directed acyclic graph (DAG) based on the dataset itself and are utilized to regularize disease-specific feature learning with disease causal interaction. 3) A novel retinal multi-disease dataset of 500 patients, including six lesion labels, was generated for evaluation purposes. Compared with other methods, the proposed approach not only achieves multi-disease diagnosis with high performance but also provides a method to estimate the causal relationships among diseases. We evaluated our method on two retinal datasets: a public color fundus photography and an in-house fundus fluorescein angiography (FFA). The results show that the proposed method outperforms other state-of-the-art multi-label models. Our FFA database and code have been released at https:\/\/github.com\/davelailai\/multi-disease-detection-guided-by-causal-estimation.git.", "title":"Multi-disease Detection in Retinal Images Guided by Disease Causal Estimation", "authors":[ "Xie, Jianyang", "Chen, Xiuju", "Zhao, Yitian", "Meng, Yanda", "Zhao, He", "Nguyen, Anh", "Li, Xiaoxin", "Zheng, Yalin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/davelailai\/multi-disease-detection-guided-by-causal-estimation.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":95 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1165_paper.pdf", "bibtext":"@InProceedings{ Ou_AGraphEmbedded_MICCAI2024,\n author = { Ou, Zaixin and Jiang, Caiwen and Liu, Yuxiao and Zhang, Yuanwang and Cui, Zhiming and Shen, Dinggang },\n title = { { A Graph-Embedded Latent Space Learning and Clustering Framework for Incomplete Multimodal Multiclass Alzheimer\u2019s Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Alzheimer\u2019s disease (AD) is an irreversible neurodegenerative disease, where early diagnosis is crucial for improving prognosis and delaying the progression of the disease. Leveraging multimodal PET images, which can reflect various biomarkers like A\u03b2 and tau protein, is a promising method for AD diagnosis. However, due to the high cost and practical issues of PET imaging, it often faces challenges with incomplete multimodal data. To address this dilemma, in this paper, we propose a Graph-embedded latent Space Learning and Clustering framework, named Graph-SLC, for multiclass AD diagnosis under incomplete multimodal data scenarios. The key concept is leveraging all available subjects, including those with incomplete modality data, to train a network for projecting subjects into their latent representations. These latent representations not only exploit the complementarity of different modalities but also showcase separability among different classes. Specifically, our Graph-SLC consists of three modules, i.e., a multimodal reconstruction module, a subject-similarity graph embedding module, and an AD-oriented latent clustering module. Among them, the multimodal reconstruction module generates subject-specific latent representations that can comprehensively incorporate information from different modalities with guidance from all available modalities. The subject-similarity graph embedding module then enhances the discriminability of different latent representations by ensuring the neighborhood relationships between subjects are preserved in subject-specific latent representations. The AD-oriented latent clustering module facilitates the separability of multiple classes by constraining subject-specific latent representations within the same class to be in the same cluster. Experiments on the ADNI show that our method achieves state-of-the-art performance in multiclass AD diagnosis. Our code is available at https:\/\/github.com\/Ouzaixin\/Graph-SLC.", "title":"A Graph-Embedded Latent Space Learning and Clustering Framework for Incomplete Multimodal Multiclass Alzheimer\u2019s Disease Diagnosis", "authors":[ "Ou, Zaixin", "Jiang, Caiwen", "Liu, Yuxiao", "Zhang, Yuanwang", "Cui, Zhiming", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Ouzaixin\/Graph-SLC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":96 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3962_paper.pdf", "bibtext":"@InProceedings{ Sha_APatientSpecific_MICCAI2024,\n author = { Sharma, Susheela and Go, Sarah and Yakay, Zeynep and Kulkarni, Yash and Kapuria, Siddhartha and Amadio, Jordan P. and Rajebi, Reza and Khadem, Mohsen and Navab, Nassir and Alambeigi, Farshid },\n title = { { A Patient-Specific Framework for Autonomous Spinal Fixation via a Steerable Drilling Robot } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, with the goal of enhancing the minimally invasive spinal fixation procedure in osteoporotic patients, we propose a first-of-its-kind image-guided robotic framework for performing and autonomous and patient-specific procedure using a unique concentric tube steerable drilling robot (CT-SDR). Particularly, leveraging CT-SDR, we introduce the concept of J-shape drilling based on a pre-operative trajectory planned in CT scan of a patient followed by appropriate calibration, registration, and navigation steps to safely execute this trajectory in real-time using our unique robotic setup. To thoroughly evaluate the performance of our framework, we performed several experiments on two different vertebral phantoms designed based on CT scan of real patients.", "title":"A Patient-Specific Framework for Autonomous Spinal Fixation via a Steerable Drilling Robot", "authors":[ "Sharma, Susheela", "Go, Sarah", "Yakay, Zeynep", "Kulkarni, Yash", "Kapuria, Siddhartha", "Amadio, Jordan P.", "Rajebi, Reza", "Khadem, Mohsen", "Navab, Nassir", "Alambeigi, Farshid" ], "id":"Conference", "arxiv_id":"2405.17606", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":97 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0150_paper.pdf", "bibtext":"@InProceedings{ Wan_Toward_MICCAI2024,\n author = { Wang, Bomin and Luo, Xinzhe and Zhuang, Xiahai },\n title = { { Toward Universal Medical Image Registration via Sharpness-Aware Meta-Continual Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Current deep learning approaches in medical image registration usually face the challenges of distribution shift and data collection, hindering real-world deployment. In contrast, universal medical image registration aims to perform registration on a wide range of clinically relevant tasks simultaneously, thus having tremendous potential for clinical applications. In this paper, we present the first attempt to achieve the goal of universal 3D medical image registration in sequential learning scenarios by proposing a continual learning method. Specifically, we utilize meta-learning with experience replay to mitigating the problem of catastrophic forgetting. To promote the generalizability of meta-continual learning, we further propose sharpness-aware meta-continual learning (SAMCL). We validate the effectiveness of our method on four datasets in a continual learning setup, including brain MR, abdomen CT, lung CT, and abdomen MR-CT image pairs. Results have shown the potential of SAMCL in realizing universal image registration, which performs better than or on par with vanilla sequential or centralized multi-task training strategies. The source code will be available from https:\/\/github.com\/xzluo97\/Continual-Reg.", "title":"Toward Universal Medical Image Registration via Sharpness-Aware Meta-Continual Learning", "authors":[ "Wang, Bomin", "Luo, Xinzhe", "Zhuang, Xiahai" ], "id":"Conference", "arxiv_id":"2406.17575", "GitHub":[ "https:\/\/github.com\/xzluo97\/Continual-Reg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":98 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1923_paper.pdf", "bibtext":"@InProceedings{ Cha_EMNet_MICCAI2024,\n author = { Chang, Ao and Zeng, Jiajun and Huang, Ruobing and Ni, Dong },\n title = { { EM-Net: Efficient Channel and Frequency Learning with Mamba for 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Convolutional neural networks have primarily led 3D medical image segmentation but may be limited by small receptive fields.\nTransformer models excel in capturing global relationships through self-attention but are challenged by high computational costs at high resolutions. Recently, Mamba, a state space model, has emerged as an effective approach for sequential modeling. Inspired by its success, we introduce a novel Mamba-based 3D medical image segmentation model called EM-Net. It not only efficiently captures attentive interaction between regions by integrating and selecting channels, but also effectively utilizes frequency domain to harmonize the learning of features across varying scales, while accelerating training speed. Comprehensive experiments on two challenging multi-organ datasets with other state-of-the-art (SOTA) algorithms show that our method exhibits better segmentation accuracy while requiring nearly half the parameter size of SOTA models and 2x faster training speed. Our code is publicly available at https:\/\/github.com\/zang0902\/EM-Net.", "title":"EM-Net: Efficient Channel and Frequency Learning with Mamba for 3D Medical Image Segmentation", "authors":[ "Chang, Ao", "Zeng, Jiajun", "Huang, Ruobing", "Ni, Dong" ], "id":"Conference", "arxiv_id":"2409.17675", "GitHub":[ "https:\/\/github.com\/zang0902\/EM-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":99 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2205_paper.pdf", "bibtext":"@InProceedings{ Che_SpatialDivision_MICCAI2024,\n author = { Chen, Jixiang and Lin, Yiqun and Sun, Haoran and Li, Xiaomeng },\n title = { { Spatial-Division Augmented Occupancy Field for Bone Shape Reconstruction from Biplanar X-Rays } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Retrieving 3D bone anatomy from biplanar X-ray images is crucial since it can significantly reduce radiation exposure compared to traditional CT-based methods. Although various deep learning models have been proposed to address this complex task, they suffer from two limitations: 1) They employ voxel representation for bone shape and exploit 3D convolutional layers to capture anatomy prior, which are memory-intensive and limit the reconstruction resolution. 2) They overlook the prevalent occlusion effect within X-ray images and directly extract features using a simple loss, which struggles to fully exploit complex X-ray information. To tackle these concerns, we present Spatial-division Augmented Occupancy Field~(SdAOF). SdAOF adopts the continuous occupancy field for shape representation, reformulating the reconstruction problem as a per-point occupancy value prediction task. Its implicit and continuous nature enables memory-efficient training and fine-scale surface reconstruction at different resolutions during the inference. Moreover, we propose a novel spatial-division augmented distillation strategy to provide feature-level guidance for capturing the occlusion relationship. Extensive experiments on the pelvis reconstruction dataset show that SdAOF outperforms state-of-the-art methods and reconstructs fine-scale bone surfaces. Our code will be made available.", "title":"Spatial-Division Augmented Occupancy Field for Bone Shape Reconstruction from Biplanar X-Rays", "authors":[ "Chen, Jixiang", "Lin, Yiqun", "Sun, Haoran", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"2407.15433", "GitHub":[ "https:\/\/github.com\/xmed-lab\/SdAOF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":100 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2795_paper.pdf", "bibtext":"@InProceedings{ Liu_Causal_MICCAI2024,\n author = { Liu, Hengxin and Li, Qiang and Nie, Weizhi and Xu, Zibo and Liu, Anan },\n title = { { Causal Intervention for Brain tumor Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Due to blurred boundaries between the background and the foreground, along with the overlapping of different tumor lesions, accurate segmentation of brain tumors presents significant challenges. To tackle these issues, we propose a causal intervention model designed for brain tumor segmentation. This model effectively eliminates the influence of irrelevant content on tumor region feature extraction, thereby enhancing segmentation precision. Notably, we adopt a front-door adjustment strategy to mitigate the confounding effects of MRI images on our segmentation outcomes. Our approach specifically targets the removal of background effects and interference in overlapping areas across tumor categories. Comprehensive experiments on the BraTS2020 and BraTS2021 datasets confirm the superior performance of our proposed method, demonstrating its effectiveness in improving accuracy in challenging segmentation scenarios.", "title":"Causal Intervention for Brain tumor Segmentation", "authors":[ "Liu, Hengxin", "Li, Qiang", "Nie, Weizhi", "Xu, Zibo", "Liu, Anan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":101 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2136_paper.pdf", "bibtext":"@InProceedings{ Zhe_Deep_MICCAI2024,\n author = { Zheng, Yuanhang and Qiu, Yiqiao and Che, Haoxuan and Chen, Hao and Zheng, Wei-Shi and Wang, Ruixuan },\n title = { { Deep Model Reference: Simple yet Effective Confidence Estimation for Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Effective confidence estimation is desired for image classifi-\ncation tasks like clinical diagnosis based on medical imaging. However, it is well known that modern neural networks often show over-confidence in their predictions. Deep Ensemble (DE) is one of the state-of-the-art methods to estimate reliable confidence. In this work, we observed that DE sometimes harms the confidence estimation due to relatively lower confidence output for correctly classified samples. Motivated by the observation that a doctor often refers to other doctors\u2019 opinions to adjust the confidence for his or her own decision, we propose a simple but effective post-hoc confidence estimation method called Deep Model Reference\n(DMR). Specifically, DMR employs one individual model to make decision while a group of individual models to help estimate the confidence for its decision. Rigorous proof and extensive empirical evaluations show that DMR achieves superior performance in confidence estimation compared to DE and other state-of-the-art methods, making trustworthy image classification more practical. Source code is available at https:\/\/openi.pcl.ac.cn\/OpenMedIA\/MICCAI2024_DMR.", "title":"Deep Model Reference: Simple yet Effective Confidence Estimation for Image Classification", "authors":[ "Zheng, Yuanhang", "Qiu, Yiqiao", "Che, Haoxuan", "Chen, Hao", "Zheng, Wei-Shi", "Wang, Ruixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":102 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2191_paper.pdf", "bibtext":"@InProceedings{ Che_TinyUNet_MICCAI2024,\n author = { Chen, Junren and Chen, Rui and Wang, Wei and Cheng, Junlong and Zhang, Lei and Chen, Liangyin },\n title = { { TinyU-Net: Lighter yet Better U-Net with Cascaded Multi-Receptive Fields } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The lightweight models for automatic medical image segmentation have the potential to advance health equity, particularly in limited-resource settings. Nevertheless, their reduced parameters and computational complexity compared to state-of-the-art methods often result in inadequate feature representation, leading to suboptimal segmentation performance. To this end, We propose a Cascade Multi-Receptive Fields (CMRF) module and develop a lighter yet better U-Net based on CMRF, named TinyU-Net, comprising only 0.48M parameters. Specifically, the CMRF module leverages redundant information across multiple channels in the feature map to explore diverse receptive fields by a cost-friendly cascading strategy, improving feature representation while maintaining the lightweightness of the model, thus enhancing performance. Testing CMRF-based TinyU-Net on cost-effective medical image segmentation datasets demonstrates superior performance with significantly fewer parameters and computational complexity compared to state-of-the-art methods. For instance, in the lesion segmentation of the ISIC2018 dataset, TinyU-Net is 52x, 3x, and 194x fewer parameters, respectively, while being +3.90%, +3.65%, and +1.05% higher IoU score than baseline U-Net, lightweight UNeXt, and high-performance TransUNet, respectively. Notably, the CMRF module exhibits adaptability, easily integrating into other networks. Experimental results suggest that TinyU-Net, with its outstanding performance, holds the potential to be implemented in limited-resource settings, thereby contributing to health equity. The code is available at https:\/\/github.com\/ChenJunren-Lab\/TinyU-Net.", "title":"TinyU-Net: Lighter yet Better U-Net with Cascaded Multi-Receptive Fields", "authors":[ "Chen, Junren", "Chen, Rui", "Wang, Wei", "Cheng, Junlong", "Zhang, Lei", "Chen, Liangyin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ChenJunren-Lab\/TinyU-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":103 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0757_paper.pdf", "bibtext":"@InProceedings{ Wu_TeleOR_MICCAI2024,\n author = { Wu, Yixuan and Hu, Kaiyuan and Shao, Qian and Chen, Jintai and Chen, Danny Z. and Wu, Jian },\n title = { { TeleOR: Real-time Telemedicine System for Full-Scene Operating Room } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"The advent of telemedicine represents a transformative development in leveraging technology to extend the reach of specialized medical expertise to remote surgeries, a field where the immediacy of expert guidance is paramount. However, the intricate dynamics of Operating Room (OR) scene pose unique challenges for telemedicine, particularly in achieving high-fidelity, real-time scene reconstruction and transmission amidst obstructions and bandwidth limitations. This paper introduces TeleOR, a pioneering system designed to address these challenges through real-time OR scene reconstruction for Tele-intervention. TeleOR distinguishes itself with three innovative approaches: dynamic self-calibration, which leverages inherent scene features for calibration without the need for preset markers, allowing for obstacle avoidance and real-time camera adjustment; selective OR reconstruction, focusing on dynamically changing scene segments to reduce reconstruction complexity; and viewport-adaptive transmission, optimizing data transmission based on real-time client feedback to efficiently deliver high-quality 3D reconstructions within bandwidth constraints. Comprehensive experiments on the 4D-OR surgical scene dataset demostrate the superiority and applicability of TeleOR, illuminating the potential to revolutionize tele-interventions by overcoming the spatial and technical barriers inherent in remote surgical guidance.", "title":"TeleOR: Real-time Telemedicine System for Full-Scene Operating Room", "authors":[ "Wu, Yixuan", "Hu, Kaiyuan", "Shao, Qian", "Chen, Jintai", "Chen, Danny Z.", "Wu, Jian" ], "id":"Conference", "arxiv_id":"2407.19763", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":104 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3047_paper.pdf", "bibtext":"@InProceedings{ Zho_SBCAL_MICCAI2024,\n author = { Zhou, Taimin and Yang, Jin and Cui, Lingguo and Zhang, Nan and Chai, Senchun },\n title = { { SBC-AL: Structure and Boundary Consistency-based Active Learning for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning-based (DL) models have shown superior representation capabilities in medical image segmentation tasks. However, these representation powers require DL models to be trained by extensive annotated data, but the high annotation costs hinder this, thus limiting their performance. Active learning (AL) is a feasible solution for efficiently training models to demonstrate representation powers under low annotation budgets. It is achieved by querying unlabeled data for new annotations to continuously train models. Thus, the performance of AL methods largely depends on the query strategy. However, designing an efficient query strategy remains challenging due to limited informa- tion from unlabeled data for querying. Another challenge is that few methods exploit information in segmentation results for querying. To address them, first, we propose a Structure-aware Feature Prediction (SFP) and Attentional Segmentation Refinement (ASR) module to enable models to generate segmentation results with sufficient information for querying. The incorporation of these modules enhances the models to capture information related to the anatomical structures and boundaries. Additionally, we propose an uncertainty-based querying strategy to leverage information in segmentation results. Specifically, uncertainty is evaluated by assessing the consistency of anatomical structure and boundary information within segmentation results by calculating Structure Consistency Score (SCS) and Boundary Consistency Score (BCS). Subsequently, data is queried for annotations based on uncertainty. The incorporation of SFP and ASR-enhanced segmentation models and this uncertainty-based querying strategy into a standard AL strategy leads to a novel method, termed Structure and Boundary Consistency-based Active Learning (SBC-AL).", "title":"SBC-AL: Structure and Boundary Consistency-based Active Learning for Medical Image Segmentation", "authors":[ "Zhou, Taimin", "Yang, Jin", "Cui, Lingguo", "Zhang, Nan", "Chai, Senchun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":105 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0783_paper.pdf", "bibtext":"@InProceedings{ Zha_WIALD2ND_MICCAI2024,\n author = { Zhao, Haoyu and Gu, Yuliang and Zhao, Zhou and Du, Bo and Xu, Yongchao and Yu, Rui },\n title = { { WIA-LD2ND: Wavelet-based Image Alignment for Self-supervised Low-Dose CT Denoising } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In clinical examinations and diagnoses, low-dose computed tomography (LDCT) is crucial for minimizing health risks compared with normal-dose computed tomography (NDCT). However, reducing the radiation dose compromises the signal-to-noise ratio, leading to degraded quality of CT images. To address this, we analyze LDCT denoising task based on experimental results from the frequency perspective, and then introduce a novel self-supervised CT image denoising method called WIA-LD2ND, only using NDCT data. The proposed WIA-LD2ND comprises two modules: Wavelet-based Image Alignment (WIA) and Frequency-Aware Multi-scale Loss (FAM). First, WIA is introduced to align NDCT with LDCT by mainly adding noise to the high-frequency components, which is the main difference between LDCT and NDCT. Second, to better capture high-frequency components and detailed information, Frequency-Aware Multi-scale Loss (FAM) is proposed by effectively utilizing multi-scale feature space. Extensive experiments on two public LDCT denoising datasets demonstrate that our WIA-LD2ND, only uses NDCT, outperforms existing several state-of-the-art weakly-supervised and self-supervised methods.", "title":"WIA-LD2ND: Wavelet-based Image Alignment for Self-supervised Low-Dose CT Denoising", "authors":[ "Zhao, Haoyu", "Gu, Yuliang", "Zhao, Zhou", "Du, Bo", "Xu, Yongchao", "Yu, Rui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhaohaoyu376\/WI-LD2ND" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":106 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1764_paper.pdf", "bibtext":"@InProceedings{ Zha_MemWarp_MICCAI2024,\n author = { Zhang, Hang and Chen, Xiang and Hu, Renjiu and Liu, Dongdong and Li, Gaolei and Wang, Rongguang },\n title = { { MemWarp: Discontinuity-Preserving Cardiac Registration with Memorized Anatomical Filters } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Many existing learning-based deformable image registration methods impose constraints on deformation fields to ensure they are globally smooth and continuous. \nHowever, this assumption does not hold in cardiac image registration, where different anatomical regions exhibit asymmetric motions during respiration and movements due to sliding organs within the chest.\nConsequently, such global constraints fail to accommodate local discontinuities across organ boundaries, potentially resulting in erroneous and unrealistic displacement fields.\nIn this paper, we address this issue with \\textit{MemWarp}, a learning framework that leverages a memory network to store prototypical information tailored to different anatomical regions. \n\\textit{MemWarp} is different from earlier approaches in two main aspects: firstly, by decoupling feature extraction from similarity matching in moving and fixed images, it facilitates more effective utilization of feature maps; secondly, despite its capability to preserve discontinuities, it eliminates the need for segmentation masks during model inference.\nIn experiments on a publicly available cardiac dataset, our method achieves considerable improvements in registration accuracy and producing realistic deformations, outperforming state-of-the-art methods with a remarkable 7.1\\% Dice score improvement over the runner-up semi-supervised method.\nSource code will be available at \\url{https:\/\/github.com\/tinymilky\/Mem-Warp}.", "title":"MemWarp: Discontinuity-Preserving Cardiac Registration with Memorized Anatomical Filters", "authors":[ "Zhang, Hang", "Chen, Xiang", "Hu, Renjiu", "Liu, Dongdong", "Li, Gaolei", "Wang, Rongguang" ], "id":"Conference", "arxiv_id":"2407.08093", "GitHub":[ "https:\/\/github.com\/tinymilky\/Mem-Warp" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":107 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1911_paper.pdf", "bibtext":"@InProceedings{ Liu_Controllable_MICCAI2024,\n author = { Liu, Shiyu and Wang, Fan and Ren, Zehua and Lian, Chunfeng and Ma, Jianhua },\n title = { { Controllable Counterfactual Generation for Interpretable Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Counterfactual generation is used to solve the problem of lack of interpreta-bility and insufficient data in deep diagnostic models. By synthesize counter-factual images based on an image-to-image generation model trained with unpaired data, we can interpret the output of a classification model according to a hypothetical class and enhance the training dataset. Recent counterfactu-al generation approaches based on autoencoders or generative adversarial models are difficult to train or produce realistic images due to the trade-off between image similarity and class difference. In this paper, we propose a new counterfactual generation method based on diffusion models. Our method combines the class-condition control from classifier-free guidance and the reference-image control with attention injection to transform the in-put images with unknown labels into a hypothesis class. Our methods can flexibly adjust the generation trade-off in the inference stage instead of the training stage, providing controllable visual explanations consistent with medical knowledge for clinicians. We demonstrate the effectiveness of our method on the ADNI structural MRI dataset for Alzheimer\u2019s disease diagno-sis and conditional 3D image2image generation tasks. Our codes can be found at https:\/\/github.com\/ladderlab-xjtu\/ControlCG.", "title":"Controllable Counterfactual Generation for Interpretable Medical Image Classification", "authors":[ "Liu, Shiyu", "Wang, Fan", "Ren, Zehua", "Lian, Chunfeng", "Ma, Jianhua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ladderlab-xjtu\/ControlCG" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":108 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2031_paper.pdf", "bibtext":"@InProceedings{ Che_RoCoSDF_MICCAI2024,\n author = { Chen, Hongbo and Gao, Yuchong and Zhang, Shuhang and Wu, Jiangjie and Ma, Yuexin and Zheng, Rui },\n title = { { RoCoSDF: Row-Column Scanned Neural Signed Distance Fields for Freehand 3D Ultrasound Imaging Shape Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"The reconstruction of high-quality shape geometry is crucial for developing freehand 3D ultrasound imaging. \nHowever, the shape reconstruction of multi-view ultrasound data remains challenging due to the elevation distortion caused by thick transducer probes.\nIn this paper, we present a novel learning-based framework RoCoSDF, which can effectively generate an implicit surface through continuous shape representations derived from row-column scanned datasets. \nIn RoCoSDF, we encode the datasets from different views into the corresponding neural signed distance function (SDF) and then operate all SDFs in a normalized 3D space to restore the actual surface contour.\nWithout requiring pre-training on large-scale ground truth shapes, our approach can synthesize a smooth and continuous signed distance field from multi-view SDFs to implicitly represent the actual geometry.\nFurthermore, two regularizers are introduced to facilitate shape refinement by constraining the SDF near the surface.\nThe experiments on twelve shape datasets acquired by two ultrasound transducer probes validate that RoCoSDF can effectively reconstruct accurate geometric shapes from multi-view ultrasound data, which outperforms current reconstruction methods. Code is\navailable at https:\/\/github.com\/chenhbo\/RoCoSDF.", "title":"RoCoSDF: Row-Column Scanned Neural Signed Distance Fields for Freehand 3D Ultrasound Imaging Shape Reconstruction", "authors":[ "Chen, Hongbo", "Gao, Yuchong", "Zhang, Shuhang", "Wu, Jiangjie", "Ma, Yuexin", "Zheng, Rui" ], "id":"Conference", "arxiv_id":"2408.07325", "GitHub":[ "https:\/\/github.com\/chenhbo\/RoCoSDF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":109 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0215_paper.pdf", "bibtext":"@InProceedings{ Yin_HistoSyn_MICCAI2024,\n author = { Yin, Chong and Liu, Siqi and Wong, Vincent Wai-Sun and Yuen, Pong C. },\n title = { { HistoSyn: Histomorphology-Focused Pathology Image Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Examining pathology images through visual microscopy is widely considered the most reliable method for diagnosing different medical conditions. Although deep learning-based methods show great potential for aiding pathology image analysis, they are hindered by the lack of accessible large-scale annotated data. Large text-to-image models have significantly advanced the synthesis of diverse contexts within natural image analysis, thereby expanding existing datasets. However, the variety of histomorphological features in pathology images, which differ from that of natural images, has been less explored. In this paper, we propose a histomorphology-focused pathology image synthesis (HistoSyn) method. Specifically, HistoSyn constructs instructive textural prompts from spatial and morphological attributes of pathology images. It involves analyzing the intricate patterns and structures found within pathological images and translating these visual details into descriptive prompts. Furthermore, HistoSyn presents new criteria for image quality evaluation focusing on spatial and morphological characteristics. Experiments have demonstrated that our method can achieve a diverse range of high-quality pathology images, with a focus on histomorphological attributes.", "title":"HistoSyn: Histomorphology-Focused Pathology Image Synthesis", "authors":[ "Yin, Chong", "Liu, Siqi", "Wong, Vincent Wai-Sun", "Yuen, Pong C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/7LFB\/HistoSyn" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":110 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3261_paper.pdf", "bibtext":"@InProceedings{ Kim_Enhancing_MICCAI2024,\n author = { Kim, Yunsoo and Wu, Jinge and Abdulle, Yusuf and Gao, Yue and Wu, Honghan },\n title = { { Enhancing Human-Computer Interaction in Chest X-ray Analysis using Vision and Language Model with Eye Gaze Patterns } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in Computer Assisted Diagnosis have shown promising performance in medical imaging tasks, particularly in chest X-ray analysis. However, the interaction between these models and radiologists has been primarily limited to input images. This work proposes a novel approach to enhance human-computer interaction in chest X-ray analysis using Vision-Language Models (VLMs) enhanced with radiologists\u2019 attention by incorporating eye gaze data alongside textual prompts. Our approach leverages heatmaps generated from eye gaze data, overlaying them onto medical images to highlight areas of intense radiologist\u2019s focus during chest X-ray evaluation. We evaluate this methodology in tasks such as visual question answering, chest X-ray report automation, error detection, and differential diagnosis. Our results demonstrate the inclusion of eye gaze information significantly enhances the accuracy of chest X-ray analysis. Also, the impact of eye gaze on fine-tuning was confirmed as it outperformed other medical VLMs in all tasks except visual question answering. This work marks the potential of leveraging both the VLM\u2019s capabilities and the radiologist\u2019s domain knowledge to improve the capabilities of AI models in medical imaging, paving a novel way for Computer Assisted Diagnosis with a human-centred AI.", "title":"Enhancing Human-Computer Interaction in Chest X-ray Analysis using Vision and Language Model with Eye Gaze Patterns", "authors":[ "Kim, Yunsoo", "Wu, Jinge", "Abdulle, Yusuf", "Gao, Yue", "Wu, Honghan" ], "id":"Conference", "arxiv_id":"2404.02370", "GitHub":[ "https:\/\/github.com\/knowlab\/CXR_VLM_EyeGaze" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":111 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0139_paper.pdf", "bibtext":"@InProceedings{ Lyu_SuperpixelGuided_MICCAI2024,\n author = { Lyu, Fei and Xu, Jingwen and Zhu, Ye and Wong, Grace Lai-Hung and Yuen, Pong C. },\n title = { { Superpixel-Guided Segment Anything Model for Liver Tumor Segmentation with Couinaud Segment Prompt } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Segment Anything Model (SAM) is a powerful foundation model which has shown impressive performance for generic image segmentation. However, directly applying SAM to liver tumor segmentation presents challenges due to the domain gap between nature images and medical images, and the requirement of labor-intensive manual prompt generation. To address these challenges, we first investigate text promptable liver tumor segmentation by Couinaud segment, where Couinaud segment prompt can be automatically extracted from radiology reports to reduce massive manual efforts. Moreover, we propose a novel CouinaudSAM to adapt SAM for liver tumor segmentation. Specifically, we achieve this by: 1) a superpixel-guided prompt generation approach to effectively transform Couinaud segment prompt into SAM-acceptable point prompt; and 2) a difficulty-aware prompt sampling strategy to make model training more effective and efficient. Experimental results on the public liver tumor segmentation dataset demonstrate that our method outperforms the other state-of-the-art methods.", "title":"Superpixel-Guided Segment Anything Model for Liver Tumor Segmentation with Couinaud Segment Prompt", "authors":[ "Lyu, Fei", "Xu, Jingwen", "Zhu, Ye", "Wong, Grace Lai-Hung", "Yuen, Pong C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":112 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1356_paper.pdf", "bibtext":"@InProceedings{ Gu_Revisiting_MICCAI2024,\n author = { Gu, Yi and Lin, Yi and Cheng, Kwang-Ting and Chen, Hao },\n title = { { Revisiting Deep Ensemble Uncertainty for Enhanced Medical Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical anomaly detection (AD) is crucial in pathological identification and localization. Current methods typically rely on uncertainty estimation in deep ensembles to detect anomalies, assuming that ensemble learners should agree on normal samples while exhibiting disagreement on unseen anomalies in the output space. However, these methods may suffer from inadequate disagreement on anomalies or diminished agreement on normal samples. To tackle these issues, we propose D2UE, a Diversified Dual-space Uncertainty Estimation framework for medical anomaly detection. To effectively balance agreement and disagreement for anomaly detection, we propose Redundancy-Aware Repulsion (RAR), which uses a similarity kernel that remains invariant to both isotropic scaling and orthogonal transformations, explicitly promoting diversity in learners\u2019 feature space. Moreover, to accentuate anomalous regions, we develop Dual-Space Uncertainty (DSU), which utilizes the ensemble\u2019s uncertainty in input and output spaces. In input space, we first calculate gradients of reconstruction error with respect to input images. The gradients are then integrated with reconstruction outputs to estimate uncertainty for inputs, enabling effective anomaly discrimination even when output space disagreement is minimal. We conduct a comprehensive evaluation of five medical benchmarks with different backbones. Experimental results demonstrate the superiority of our method to state-of-the-art methods and the effectiveness of each component in our framework.", "title":"Revisiting Deep Ensemble Uncertainty for Enhanced Medical Anomaly Detection", "authors":[ "Gu, Yi", "Lin, Yi", "Cheng, Kwang-Ting", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2409.17485", "GitHub":[ "https:\/\/github.com\/Rubiscol\/D2UE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":113 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2261_paper.pdf", "bibtext":"@InProceedings{ Dan_SiNGR_MICCAI2024,\n author = { Dang, Trung and Nguyen, Huy Hoang and Tiulpin, Aleksei },\n title = { { SiNGR: Brain Tumor Segmentation via Signed Normalized Geodesic Transform Regression } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"One of the primary challenges in brain tumor segmentation arises from the uncertainty of voxels close to tumor boundaries. However, the conventional process of generating ground truth segmentation masks fails to treat such uncertainties properly. Those ``hard labels\u2019\u2019 with 0s and 1s conceptually influenced the majority of prior studies on brain image segmentation. As a result, tumor segmentation is often solved through voxel classification. In this work, we instead view this problem as a voxel-level regression, where the ground truth represents a certainty mapping from any pixel to the border of the tumor. We propose a novel ground truth label transformation, which is based on a signed geodesic transform, to capture the uncertainty in brain tumors\u2019 vicinity. We combine this idea with a Focal-like regression L1-loss that enables effective regression learning in high-dimensional output space by appropriately weighting voxels according to their difficulty. We thoroughly conduct an experimental evaluation to validate the components of our proposed method, compare it to a diverse array of state-of-the-art segmentation models, and show that it is architecture-agnostic. The code of our method is made publicly available (\\url{https:\/\/github.com\/Oulu-IMEDS\/SiNGR\/}).", "title":"SiNGR: Brain Tumor Segmentation via Signed Normalized Geodesic Transform Regression", "authors":[ "Dang, Trung", "Nguyen, Huy Hoang", "Tiulpin, Aleksei" ], "id":"Conference", "arxiv_id":"2405.16813", "GitHub":[ "https:\/\/github.com\/Oulu-IMEDS\/SiNGR" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.16813", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":3, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":114 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2040_paper.pdf", "bibtext":"@InProceedings{ Agg_Acrosssubject_MICCAI2024,\n author = { Aggarwal, Himanshu and Al-Shikhley, Liza and Thirion, Bertrand },\n title = { { Across-subject ensemble-learning alleviates the need for large samples for fMRI decoding } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Decoding cognitive states from functional magnetic resonance imaging is central to understanding the functional organization of the brain. Within-subject decoding avoids between-subject correspondence problems but requires large sample sizes to make accurate predictions; obtaining such large sample sizes is both challenging and expensive. Here, we investigate an ensemble approach to decoding that combines the classifiers trained on data from other subjects to decode cognitive states in a new subject. We compare it with the conventional decoding approach on five different datasets and cognitive tasks. We find that it outperforms the conventional approach by up to 20% in accuracy, especially for datasets with limited per-subject data. The ensemble approach is particularly advantageous when the classifier is trained in voxel space. Furthermore, a Multi-layer Perceptron turns out to be a good default choice as an ensemble method. These results show that the pre-training strategy reduces the need for large per-subject data.", "title":"Across-subject ensemble-learning alleviates the need for large samples for fMRI decoding", "authors":[ "Aggarwal, Himanshu", "Al-Shikhley, Liza", "Thirion, Bertrand" ], "id":"Conference", "arxiv_id":"2407.12056", "GitHub":[ "https:\/\/github.com\/man-shu\/ensemble-fmri" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":115 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1314_paper.pdf", "bibtext":"@InProceedings{ Zul_CardioSpectrum_MICCAI2024,\n author = { Zuler, Shahar and Tejman-Yarden, Shai and Raviv, Dan },\n title = { { CardioSpectrum: Comprehensive Myocardium Motion Analysis with 3D Deep Learning and Geometric Insights } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The ability to map left ventricle (LV) myocardial motion using computed tomography angiography (CTA) is essential to diagnosing cardiovascular conditions and guiding interventional procedures. Due to their inherent locality, conventional neural networks typically have difficulty predicting subtle tangential movements, which considerably lessens the level of precision at which myocardium three-dimensional (3D) mapping can be performed. Using 3D optical flow techniques and Functional Maps (FMs), we present a comprehensive approach to address this problem. FMs are known for their capacity to capture global geometric features, thus providing a fuller understanding of 3D geometry. As an alternative to traditional segmentation-based priors, we employ surface-based two-dimensional (2D) constraints derived from spectral correspondence methods. Our 3D deep learning architecture, based on the ARFlow model, is optimized to handle complex 3D motion analysis tasks. By incorporating FMs, we can capture the subtle tangential movements of the myocardium surface precisely, hence significantly improving the accuracy of 3D mapping of the myocardium. The experimental results confirm the effectiveness of this method in enhancing myocardium motion analysis. This approach can contribute to improving cardiovascular diagnosis and treatment.\nOur code and additional resources are available at: https:\/\/shaharzuler.github.io\/CardioSpectrumPage", "title":"CardioSpectrum: Comprehensive Myocardium Motion Analysis with 3D Deep Learning and Geometric Insights", "authors":[ "Zuler, Shahar", "Tejman-Yarden, Shai", "Raviv, Dan" ], "id":"Conference", "arxiv_id":"2407.03794", "GitHub":[ "https:\/\/github.com\/shaharzuler\/CardioSpectrum" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":116 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1064_paper.pdf", "bibtext":"@InProceedings{ Jin_Location_MICCAI2024,\n author = { Jin, Qiangguo and Huang, Jiapeng and Sun, Changming and Cui, Hui and Xuan, Ping and Su, Ran and Wei, Leyi and Wu, Yu-Jie and Wu, Chia-An and Duh, Henry B. L. and Lu, Yueh-Hsun },\n title = { { Location embedding based pairwise distance learning for fine-grained diagnosis of urinary stones } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The precise diagnosis of urinary stones is crucial for devising effective treatment strategies. The diagnostic process, however, is often complicated by the low contrast between stones and surrounding tissues, as well as the variability in stone locations across different patients. To address this issue, we propose a novel location embedding based pairwise distance learning network (LEPD-Net) that leverages low-dose abdominal X-ray imaging combined with location information for the fine-grained diagnosis of urinary stones. LEPD-Net enhances the representation of stone-related features through context-aware region enhancement, incorporates critical location knowledge via stone location embedding, and achieves recognition of fine-grained objects with our innovative fine-grained pairwise distance learning. Additionally, we have established an in-house dataset on urinary tract stones to demonstrate the effectiveness of our proposed approach. Comprehensive experiments conducted on this dataset reveal that our framework significantly surpasses existing state-of-the-art methods.", "title":"Location embedding based pairwise distance learning for fine-grained diagnosis of urinary stones", "authors":[ "Jin, Qiangguo", "Huang, Jiapeng", "Sun, Changming", "Cui, Hui", "Xuan, Ping", "Su, Ran", "Wei, Leyi", "Wu, Yu-Jie", "Wu, Chia-An", "Duh, Henry B. L.", "Lu, Yueh-Hsun" ], "id":"Conference", "arxiv_id":"2407.00431", "GitHub":[ "https:\/\/github.com\/BioMedIA-repo\/LEPD-Net.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":117 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0730_paper.pdf", "bibtext":"@InProceedings{ Bao_Realworld_MICCAI2024,\n author = { Bao, Mingkun and Wang, Yan and Wei, Xinlong and Jia, Bosen and Fan, Xiaolin and Lu, Dong and Gu, Yifan and Cheng, Jian and Zhang, Yingying and Wang, Chuanyu and Zhu, Haogang },\n title = { { Real-world Visual Navigation for Cardiac Ultrasound View Planning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Echocardiography (ECHO) is commonly used to assist in the diagnosis of cardiovascular diseases (CVDs). However, manually conducting standardized ECHO view acquisitions by manipulating the probe demands significant experience and training for sonographers. In this work, we propose a visual navigation system for cardiac ultrasound view planning, designed to assist novice sonographers in accurately obtaining the required views for CVDs diagnosis. The system introduces a view-agnostic feature extractor to explore the spatial relationships between source frame views, learning the relative rotations among different frames for network regression, thereby facilitating transfer learning to improve the accuracy and robustness of identifying specific target planes. Additionally, we present a target consistency loss to ensure that frames within the same scan regress to the same target plane. The experimental results demonstrate that the average error in the apical four-chamber view (A4C) can be reduced to 7.055 degrees. Moreover, results from practical clinical validation indicate that, with the guidance of the visual navigation system, the average time for acquiring A4C view can be reduced by at least 3.86 times, which is instructive for the clinical practice of novice sonographers.", "title":"Real-world Visual Navigation for Cardiac Ultrasound View Planning", "authors":[ "Bao, Mingkun", "Wang, Yan", "Wei, Xinlong", "Jia, Bosen", "Fan, Xiaolin", "Lu, Dong", "Gu, Yifan", "Cheng, Jian", "Zhang, Yingying", "Wang, Chuanyu", "Zhu, Haogang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":118 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2265_paper.pdf", "bibtext":"@InProceedings{ Zha_ModelMix_MICCAI2024,\n author = { Zhang, Ke and Patel, Vishal M. },\n title = { { ModelMix: A New Model-Mixup Strategy to Minimize Vicinal Risk across Tasks for Few-scribble based Cardiac Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pixel-level dense labeling is both resource-intensive and time-consuming, whereas weak labels such as scribble present a more feasible alternative to full annotations. However, training segmentation networks with weak supervision from scribbles remains challenging. Inspired by the fact that different segmentation tasks can be correlated with each other, we introduce a new approach to few-scribble supervised segmentation based on model parameter interpolation, termed as ModelMix. Leveraging the prior knowledge that linearly interpolating convolution kernels and bias terms should result in linear interpolations of the corresponding feature vectors, ModelMix constructs virtual models using convex combinations of convolutional parameters from separate encoders. We then regularize the model set to minimize vicinal risk across tasks in both unsupervised and scribble-supervised way. Validated on three open datasets, i.e., ACDC, MSCMRseg, and MyoPS, our few-scribble guided ModelMix significantly surpasses the performance of state-of-the-art scribble supervised methods. Our code is available at https:\/\/github.com\/BWGZK\/ModelMix.", "title":"ModelMix: A New Model-Mixup Strategy to Minimize Vicinal Risk across Tasks for Few-scribble based Cardiac Segmentation", "authors":[ "Zhang, Ke", "Patel, Vishal M." ], "id":"Conference", "arxiv_id":"2406.13237", "GitHub":[ "https:\/\/github.com\/BWGZK\/ModelMix\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":119 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1749_paper.pdf", "bibtext":"@InProceedings{ Yan_Adversarial_MICCAI2024,\n author = { Yang, Yiguang and Ning, Guochen and Zhong, Changhao and Liao, Hongen },\n title = { { Adversarial Diffusion Model for Domain-Adaptive Depth Estimation in Bronchoscopic Navigation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In bronchoscopic navigation, depth estimation has emerged as a promising method with higher robustness for localizing camera and obtaining scene geometry. While many supervised approaches have shown success for natural images, the scarcity of depth annotations limits their deployment in bronchoscopic scenarios. To address the issue of lacking depth labels, a common approach for unsupervised domain adaptation (UDA) includes one-shot mapping through generative adversarial networks. However, conventional adversarial models that directly recover the image distribution can suffer from reduced sample fidelity and learning biases. In this study, we propose a novel adversarial diffusion model for domain-adaptive depth estimation on bronchoscopic images. Our two-stage approach sequentially trains a supervised network on labeled virtual images, and an unsupervised adversarial network that aligns domain-invariant representations for cross-domain adaptation. This model reformulates depth estimation at each stage as an iterative diffusion-denoising process within the latent space for mitigating mapping biases and enhancing model performance. The experiments on clinical sequences show the superiority of our method on depth estimation as well as geometry reconstruction for bronchoscopic navigation.", "title":"Adversarial Diffusion Model for Domain-Adaptive Depth Estimation in Bronchoscopic Navigation", "authors":[ "Yang, Yiguang", "Ning, Guochen", "Zhong, Changhao", "Liao, Hongen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":120 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0765_paper.pdf", "bibtext":"@InProceedings{ Hou_EnergyBased_MICCAI2024,\n author = { Hou, Zeyi and Yan, Ruixin and Yan, Ziye and Lang, Ning and Zhou, Xiuzhuang },\n title = { { Energy-Based Controllable Radiology Report Generation with Medical Knowledge } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated generation of radiology reports from chest X-rays has the potential to substantially reduce the workload of radiologists. Recent advances in report generation using deep learning algorithms have achieved significant results, benefiting from the incorporation of medical knowledge. However, incorporation of additional knowledge or constraints in existing models often require either altering network structures or task-specific fine-tuning. In this paper, we propose an energy-based controllable report generation method, named ECRG. Specifically, our method directly utilizes diverse off-the-shelf medical expert models or knowledge to design energy functions, which are integrated into pre-trained report generation models during the inference stage, without any alterations to the network structure or fine-tuning. We also propose an acceleration algorithm to improve the efficiency of sampling the complex multi-modal distribution of report generation. ECRG is model-agnostic and can be readily used for other pre-trained report generation models. Two cases are presented on the design of energy functions tailored to medical expert systems and knowledge. The experiments on widely used datasets Chest ImaGenome v1.0.0 and MIMIC-CXR demonstrate the effectiveness of our proposed approach.", "title":"Energy-Based Controllable Radiology Report Generation with Medical Knowledge", "authors":[ "Hou, Zeyi", "Yan, Ruixin", "Yan, Ziye", "Lang, Ning", "Zhou, Xiuzhuang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":121 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0245_paper.pdf", "bibtext":"@InProceedings{ He_FRCNet_MICCAI2024,\n author = { He, Along and Li, Tao and Wu, Yanlin and Zou, Ke and Fu, Huazhu },\n title = { { FRCNet: Frequency and Region Consistency for Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Limited labeled data hinder the application of deep learning in medical domain. In clinical practice, there are sufficient unlabeled data that are not effectively used, and semi-supervised learning (SSL) is a promising way for leveraging these unlabeled data. However, existing SSL methods ignore frequency domain and region-level information and it is important for lesion regions located at low frequencies and with significant scale changes. In this paper, we introduce two consistency regularization strategies for semi-supervised medical image segmentation, including frequency domain consistency (FDC) to assist the feature learning in frequency domain and multi-granularity region similarity consistency (MRSC) to perform multi-scale region-level local context information feature learning. With the help of the proposed FDC and MRSC, we can leverage the powerful feature representation capability of them in an effective and efficient way. Extensive experiments on two medical image segmentation datasets show that our approach achieves large performance gains and exceeds other state-of-the-art methods. Code will be available.", "title":"FRCNet: Frequency and Region Consistency for Semi-supervised Medical Image Segmentation", "authors":[ "He, Along", "Li, Tao", "Wu, Yanlin", "Zou, Ke", "Fu, Huazhu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":122 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2311_paper.pdf", "bibtext":"@InProceedings{ Kol_MedCLIPSAM_MICCAI2024,\n author = { Koleilat, Taha and Asgariandehkordi, Hojat and Rivaz, Hassan and Xiao, Yiming },\n title = { { MedCLIP-SAM: Bridging Text and Image Towards Universal Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation of anatomical structures and pathology is crucial in modern clinical diagnosis, disease study, and treatment planning. To date, great progress has been made in deep learning-based segmentation techniques, but most methods still lack data efficiency, generalizability, and interactability. Consequently, the development of new, precise segmentation methods that demand fewer labeled datasets is of utmost importance in medical image analysis. Recently, the emergence of foundation models, such as CLIP and Segment-Anything-Model (SAM), with comprehensive cross-domain representation opened the door for interactive and universal image segmentation. However, exploration of these models for data-efficient medical image segmentation is still limited but is highly necessary. In this paper, we propose a novel framework, called MedCLIP-SAM that combines CLIP and SAM models to generate segmentation of clinical scans using text prompts in both zero-shot and weakly supervised settings. To achieve this, we employed a new Decoupled Hard Negative Noise Contrastive Estimation (DHN-NCE) loss to fine-tune the BiomedCLIP model and the recent gScoreCAM to generate prompts to obtain segmentation masks from SAM in a zero-shot setting. Additionally, we explored the use of zero-shot segmentation labels in a weakly supervised paradigm to improve the segmentation quality further. By extensively testing three diverse segmentation tasks and medical image modalities (breast tumor ultrasound, brain tumor MRI, and lung X-ray), our proposed framework has demonstrated excellent accuracy. Code is available at https:\/\/github.com\/HealthX-Lab\/MedCLIP-SAM.", "title":"MedCLIP-SAM: Bridging Text and Image Towards Universal Medical Image Segmentation", "authors":[ "Koleilat, Taha", "Asgariandehkordi, Hojat", "Rivaz, Hassan", "Xiao, Yiming" ], "id":"Conference", "arxiv_id":"2403.20253", "GitHub":[ "https:\/\/github.com\/HealthX-Lab\/MedCLIP-SAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":123 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1340_paper.pdf", "bibtext":"@InProceedings{ Cui_MCAD_MICCAI2024,\n author = { Cui, Jiaqi and Zeng, Xinyi and Zeng, Pinxian and Liu, Bo and Wu, Xi and Zhou, Jiliu and Wang, Yan },\n title = { { MCAD: Multi-modal Conditioned Adversarial Diffusion Model for High-Quality PET Image Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Radiation hazards associated with standard-dose positron emission tomography (SPET) images remain a concern, whereas the quality of low-dose PET (LPET) images fails to meet clinical requirements. Therefore, there is great interest in reconstructing SPET images from LPET images. However, prior studies focus solely on image data, neglecting vital complementary information from other mo-dalities, e.g., patients\u2019 clinical tabular, resulting in compromised reconstruction with limited diagnostic utility. Moreover, they often overlook the semantic consistency between real SPET and reconstructed images, leading to distorted semantic contexts. To tackle these problems, we propose a novel Multi-modal Conditioned Adversarial Diffusion model (MCAD) to reconstruct SPET images from multi-modal inputs, including LPET images and clinical tabular. Specifically, our MCAD incorporates a Multi-modal conditional Encoder (Mc-Encoder) to extract multi-modal features, followed by a conditional diffusion process to blend noise with multi-modal features and gradually map blended features to the target SPET images. To balance multi-modal inputs, the Mc-Encoder embeds Optimal Multi-modal Transport co-Attention (OMTA) to narrow the heterogeneity gap between image and tabular while capturing their interactions, providing sufficient guidance for reconstruction. In addition, to mitigate semantic distortions, we introduce the Multi-Modal Masked Text Reconstruction (M3TRec), which leverages semantic knowledge extracted from denoised PET images to restore the masked clinical tabular, thereby compelling the network to maintain accurate semantics during reconstruction. To expedite the diffusion process, we further introduce an adversarial diffusive network with a reduced number of diffusion steps. Experiments show that our method achieves the state-of-the-art performance both qualitatively and quantitatively.", "title":"MCAD: Multi-modal Conditioned Adversarial Diffusion Model for High-Quality PET Image Reconstruction", "authors":[ "Cui, Jiaqi", "Zeng, Xinyi", "Zeng, Pinxian", "Liu, Bo", "Wu, Xi", "Zhou, Jiliu", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":124 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3386_paper.pdf", "bibtext":"@InProceedings{ Ber_Simulation_MICCAI2024,\n author = { Bergere, Bastien and Dautremer, Thomas and Comtat, Claude },\n title = { { Simulation Based Inference for PET iterative reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"The analytical projector (system matrix) used in most PET reconstructions does not incorporate Compton scattering and other important physical effects that affect the process generating the PET data, which can lead to biases. In our work, we define the projector from the generative model of a Monte-Carlo simulator, which already encompasses many of these effects. Based on the simulator\u2019s implicit distribution, we propose to learn a continuous analytic surrogate for the projector by using a neural density estimator. This avoids the discretization bottleneck associated with direct Monte-Carlo estimation of the PET system matrix, which leads to very high simulation cost. We compare our method with reconstructions using the classical projector, in which corrective terms are factored into a geometrically derived system matrix. Our experiments were carried out in the 2D setting, which enables smaller-scale testing", "title":"Simulation Based Inference for PET iterative reconstruction", "authors":[ "Bergere, Bastien", "Dautremer, Thomas", "Comtat, Claude" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":125 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1441_paper.pdf", "bibtext":"@InProceedings{ Zha_XASim2Real_MICCAI2024,\n author = { Zhang, Baochang and Zhang, Zichen and Liu, Shuting and Faghihroohi, Shahrooz and Schunkert, Heribert and Navab, Nassir },\n title = { { XA-Sim2Real: Adaptive Representation Learning for Vessel Segmentation in X-ray Angiography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate vessel segmentation from X-ray Angiography (XA) is essential for various medical applications, including diagnosis, treatment planning, and image-guided interventions. However, learning-based methods face challenges such as inaccurate or insufficient manual annotations, anatomical variability, and data heterogeneity across different medical institutions. In this paper, we propose XA-Sim2Real, a novel adaptive framework for vessel segmentation in XA image. Our approach leverages Digitally Reconstructed Vascular Radiographs (DRVRs) and a two-stage adaptation process to achieve promising segmentation performance on XA image without the need for manual annotations. The first stage involves an XA simulation module for generating realistic simulated XA images from patients\u2019 CT angiography data, providing more accurate vascular shapes and backgrounds than existing curvilinear-structure simulation methods. In the second stage, a novel adaptive representation alignment module addresses data heterogeneity by performing intra-domain adaptation for the complex and diverse nature of XA data in different settings. This module utilizes self-supervised and contrastive learning mechanisms to learn adaptive representations for unlabeled XA image. We extensively evaluate our method on both public and in-house datasets, demonstrating superior performance compared to state-of-the-art self-supervised methods and competitive performance compared to supervised method.", "title":"XA-Sim2Real: Adaptive Representation Learning for Vessel Segmentation in X-ray Angiography", "authors":[ "Zhang, Baochang", "Zhang, Zichen", "Liu, Shuting", "Faghihroohi, Shahrooz", "Schunkert, Heribert", "Navab, Nassir" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":126 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2279_paper.pdf", "bibtext":"@InProceedings{ Zeh_Rethinking_MICCAI2024,\n author = { Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad },\n title = { { Rethinking Histology Slide Digitization Workflows for Low-Resource Settings } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Histology slide digitization is becoming essential for telepathology (remote consultation), knowledge sharing (education), and using the state-of-the-art artificial intelligence algorithms (augmented\/automated end-to-end clinical workflows). However, the cumulative costs of digital multi-slide high-speed brightfield scanners, cloud\/on-premises storage, and personnel (IT and technicians) make the current slide digitization workflows out-of-reach for limited-resource settings, further widening the health equity gap; even single-slide manual scanning commercial solutions are costly due to hardware requirements (high-resolution cameras, high-spec PC\/workstation, and support for only high-end microscopes). In this work, we present a new cloud slide digitization workflow for creating scanner-quality whole-slide images (WSIs) from uploaded low-quality videos, acquired from cheap and inexpensive microscopes with built-in cameras. Specifically, we present a pipeline to create stitched WSIs while automatically deblurring out-of-focus regions, upsampling input 10X images to 40X resolution, and reducing brightness\/contrast and light-source illumination variations. We demonstrate the WSI creation efficacy from our workflow on World Health Organization-declared neglected tropical disease, Cutaneous Leishmaniasis (prevalent only in the poorest regions of the world and only diagnosed by sub-specialist dermatopathologists, rare in poor countries), as well as other common pathologies on core biopsies of breast, liver, duodenum, stomach and lymph node. Upon acceptance, we will release our code, datasets, pretrained models, and cloud platform for uploading microscope videos and downloading\/viewing WSIs with shareable links (no sign-in required) for telepathology and knowledge sharing.", "title":"Rethinking Histology Slide Digitization Workflows for Low-Resource Settings", "authors":[ "Zehra, Talat", "Marino, Joseph", "Wang, Wendy", "Frantsuzov, Grigoriy", "Nadeem, Saad" ], "id":"Conference", "arxiv_id":"2405.08169", "GitHub":[ "https:\/\/github.com\/nadeemlab\/DeepLIIF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":127 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0721_paper.pdf", "bibtext":"@InProceedings{ Wu_Towards_MICCAI2024,\n author = { Wu, Hong and Fu, Juan and Ye, Hongsheng and Zhong, Yuming and Zou, Xuebin and Zhou, Jianhua and Wang, Yi },\n title = { { Towards Multi-modality Fusion and Prototype-based Feature Refinement for Clinically Significant Prostate Cancer Classification in Transrectal Ultrasound } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Prostate cancer is a highly prevalent cancer and ranks as the second leading cause of cancer-related deaths in men globally. Recently, the utilization of multi-modality transrectal ultrasound (TRUS) has gained significant traction as a valuable technique for guiding prostate biopsies. In this study, we present a novel learning framework for clinically significant prostate cancer (csPCa) classification by using multi-modality TRUS. The proposed framework employs two separate 3D ResNet-50 to extract distinctive features from B-mode and shear wave elastography (SWE). Additionally, an attention module is incorporated to effectively refine B-mode features and aggregate the extracted features from both modalities. Furthermore, we utilize few shot segmentation task to enhance the capacity of the classification encoder. Due to the limited availability of csPCa masks, a prototype correction module is employed to extract representative prototypes of csPCa. The performance of the framework is assessed on a large-scale dataset consisting of 512 TRUS videos with biopsy-proved prostate cancer. The results demonstrate the strong capability in accurately identifying csPCa, achieving an area under the curve (AUC) of 0.86. Moreover, the framework generates visual class activation mapping (CAM), which can serve as valuable assistance for localizing csPCa. These CAM images may offer valuable guidance during TRUS-guided targeted biopsies, enhancing the efficacy of the biopsy procedure. The code is available at https:\/\/github.com\/2313595986\/SmileCode.", "title":"Towards Multi-modality Fusion and Prototype-based Feature Refinement for Clinically Significant Prostate Cancer Classification in Transrectal Ultrasound", "authors":[ "Wu, Hong", "Fu, Juan", "Ye, Hongsheng", "Zhong, Yuming", "Zou, Xuebin", "Zhou, Jianhua", "Wang, Yi" ], "id":"Conference", "arxiv_id":"2406.14069", "GitHub":[ "https:\/\/github.com\/2313595986\/SmileCode" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":128 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0262_paper.pdf", "bibtext":"@InProceedings{ Fan_AttentionEnhanced_MICCAI2024,\n author = { Fang, Yuqi and Wang, Wei and Wang, Qianqian and Li, Hong-Jun and Liu, Mingxia },\n title = { { Attention-Enhanced Fusion of Structural and Functional MRI for Analyzing HIV-Associated Asymptomatic Neurocognitive Impairment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Asymptomatic neurocognitive impairment (ANI) is a predominant form of cognitive impairment among individuals infected with human immunodeficiency virus (HIV). The current diagnostic criteria for ANI primarily rely on subjective clinical assessments, possibly leading to different interpretations among clinicians. Some recent studies leverage structural or functional MRI containing objective biomarkers for ANI analysis, offering clinicians companion diagnostic tools. However, they mainly utilize a single imaging modality, neglecting complementary information provided by structural and functional MRI. To this end, we propose an attention-enhanced structural and functional MRI fusion (ASFF) framework for HIV-associated ANI analysis. Specifically, the ASFF first extracts data-driven and human-engineered features from structural MRI, and also captures functional MRI features via a graph isomorphism network and Transformer. A mutual cross-attention fusion module is then designed to model the underlying relationship between structural and functional MRI. Additionally, a semantic inter-modality constraint is introduced to encourage consistency of multimodal features, facilitating effective feature fusion. Experimental results on 137 subjects from an HIV-associated ANI dataset with T1-weighted MRI and resting-state functional MRI show the effectiveness of our ASFF in ANI identification. Furthermore, our method can identify both modality-shared and modality-specific brain regions, which may advance our understanding of the structural and functional pathology underlying ANI.", "title":"Attention-Enhanced Fusion of Structural and Functional MRI for Analyzing HIV-Associated Asymptomatic Neurocognitive Impairment", "authors":[ "Fang, Yuqi", "Wang, Wei", "Wang, Qianqian", "Li, Hong-Jun", "Liu, Mingxia" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":129 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0901_paper.pdf", "bibtext":"@InProceedings{ Li_Iterative_MICCAI2024,\n author = { Li, Shuhan and Lin, Yi and Chen, Hao and Cheng, Kwang-Ting },\n title = { { Iterative Online Image Synthesis via Diffusion Model for Imbalanced Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate and robust classification of diseases is important for proper diagnosis and treatment. However, medical datasets often face challenges related to limited sample sizes and inherent imbalanced distributions, due to difficulties in data collection and variations in disease prevalence across different types. In this paper, we introduce an Iterative Online Image Synthesis (IOIS) framework to address the class imbalance problem in medical image classification. Our framework incorporates two key modules, namely Online Image Synthesis (OIS) and Accuracy Adaptive Sampling (AAS), which collectively target the imbalance classification issue at both the instance level and the class level. The OIS module alleviates the data insufficiency problem by generating representative samples tailored for online training of the classifier. On the other hand, the AAS module dynamically balances the synthesized samples among various classes, targeting those with low training accuracy. To evaluate the effectiveness of our proposed method in addressing imbalanced classification, we conduct experiments on the HAM10000 and APTOS datasets. The results obtained demonstrate the superiority of our approach over state-of-the-art methods as well as the effectiveness of each component. The source code is available at https:\/\/github.com\/ustlsh\/IOIS_imbalance.", "title":"Iterative Online Image Synthesis via Diffusion Model for Imbalanced Classification", "authors":[ "Li, Shuhan", "Lin, Yi", "Chen, Hao", "Cheng, Kwang-Ting" ], "id":"Conference", "arxiv_id":"2403.08407", "GitHub":[ "https:\/\/github.com\/ustlsh\/IOIS_imbalance" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":130 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1623_paper.pdf", "bibtext":"@InProceedings{ Che_Modeling_MICCAI2024,\n author = { Chen, Aobo and Li, Yangyi and Qian, Wei and Morse, Kathryn and Miao, Chenglin and Huai, Mengdi },\n title = { { Modeling and Understanding Uncertainty in Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image classification is an important task in many different medical applications. The past years have witnessed the success of Deep Neural Networks (DNNs) in medical image classification. However, traditional softmax outputs produced by DNNs fail to estimate uncertainty in medical image predictions. Contrasting with conventional uncertainty estimation approaches, conformal prediction (CP) stands out as a model-agnostic and distribution-free methodology that constructs statistically rigorous uncertainty sets for model predictions. However, existing exact full conformal methods involve retraining the underlying DNN model for each test instance with each possible label, demanding substantial computational resources. Additionally, existing works fail to uncover the root causes of medical prediction uncertainty, making it difficult for doctors to interpret the estimated uncertainties associated with medical diagnoses. To address these challenges, in this paper, we first propose an efficient approximate full CP method, which involves tracking the gradient updates contributed by these samples during training. Subsequently, we design an interpretation method that uses these updates to identify the top-k most influential training samples that significantly impact models\u2019 uncertainties. Extensive experiments on real-world medical image datasets are conducted to verify the effectiveness of the proposed methods.", "title":"Modeling and Understanding Uncertainty in Medical Image Classification", "authors":[ "Chen, Aobo", "Li, Yangyi", "Qian, Wei", "Morse, Kathryn", "Miao, Chenglin", "Huai, Mengdi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":131 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4076_paper.pdf", "bibtext":"@InProceedings{ Li_Prediction_MICCAI2024,\n author = { Li, Ganping and Otake, Yoshito and Soufi, Mazen and Masuda, Masachika and Uemura, Keisuke and Takao, Masaki and Sugano, Nobuhiko and Sato, Yoshinobu },\n title = { { Prediction of Disease-Related Femur Shape Changes Using Geometric Encoding and Clinical Context on a Hip Disease CT Database } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The accurate prediction of femur shape changes due to hip diseases is potentially useful for early diagnosis, treatment planning, and the assessment of disease progression. This study proposes a novel pipeline that leverages geometry encoding and context-awareness mechanisms to predict disease-related femur shape changes. Our method exploits the inherent geometric properties of femurs in CT scans to model and predict alterations in bone structure associated with various hip diseases, such as osteoarthritis (OA). We constructed a database of 367 CT scans from patients with hip OA, annotated using a previously developed bone segmentation model and an automated OA grading system. By combining geometry encoding and clinical context, our model achieves femur surface deformation prediction through implicit geometric and clinical insights, allowing for the detailed modeling of bone geometry variations due to disease progression. Our model demonstrated moderate accuracy in a cross-validation study, with a point-to-face distance (P2F) of 1.545mm on the femoral head, aligning with other advanced predictive methods. This work marks a significant step toward personalized hip disease treatment, offering a valuable tool for clinicians and researchers and aiming to enhance patient care outcomes.", "title":"Prediction of Disease-Related Femur Shape Changes Using Geometric Encoding and Clinical Context on a Hip Disease CT Database", "authors":[ "Li, Ganping", "Otake, Yoshito", "Soufi, Mazen", "Masuda, Masachika", "Uemura, Keisuke", "Takao, Masaki", "Sugano, Nobuhiko", "Sato, Yoshinobu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/RIO98\/FemurSurfacePrediction" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":132 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2652_paper.pdf", "bibtext":"@InProceedings{ Don_UncertaintyAware_MICCAI2024,\n author = { Dong, Zhicheng and Yue, Xiaodong and Chen, Yufei and Zhou, Xujing and Liang, Jiye },\n title = { { Uncertainty-Aware Multi-View Learning for Prostate Cancer Grading with DWI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Grading of prostate cancer plays an important role in the planning of surgery and prognosis. Multi-parametric magnetic resonance imaging (mp-MRI) of the prostate can facilitate the detection, localization and grade of prostate cancer. In mp-MRI, Diffusion-Weighted Imaging (DWI) can distinguish a malignant neoplasm from benign prostate tissue due to a significant difference in the apparent diffusion sensitivity coefficient (b-value). DWI using high b-value is preferred for prostate cancer grading, providing high accuracy despite a decrease signal-to-noise ratio and increased image distortion. On the other hand, low b-value could avoid confounding pseudo-perfusion effects but in which the prostate normal parenchyma shows a very high signal intensity, making it difficult to distinguish it from prostate cancer foci. To fully capitalize on the advantages and information of DWIs with different b-values, we formulate the prostate cancer grading as a multi-view classification problem, treating DWIs with different b-values as distinct views. Multi-view classification aims to integrate views into a unified and comprehensive representation. However, existing multi-view methods cannot quantify the uncertainty of views and lack a interpretable and reliable fusion rule. To tackle this problem, we propose uncertainty-aware multi-view classification with uncertainty-aware belief integration. We measure the uncertainty of DWI based on Evidential Deep Learning and propose a novel strategy of uncertainty-aware belief integration to fuse multiple DWIs based on uncertainty measurements. Results demonstrate that our method outperforms current multi-view learning methods, showcasing its superior performance.", "title":"Uncertainty-Aware Multi-View Learning for Prostate Cancer Grading with DWI", "authors":[ "Dong, Zhicheng", "Yue, Xiaodong", "Chen, Yufei", "Zhou, Xujing", "Liang, Jiye" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":133 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0480_paper.pdf", "bibtext":"@InProceedings{ Djo_This_MICCAI2024,\n author = { Djoumessi, Kerol and Bah, Bubacarr and Ku\u0308hlewein, Laura and Berens, Philipp and Koch, Lisa },\n title = { { This actually looks like that: Proto-BagNets for local and global interpretability-by-design } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Interpretability is a key requirement for the use of machine learning models in high-stakes applications, including medical diagnosis. Explaining black-box models mostly relies on post-hoc methods that do not faithfully reflect the model\u2019s behavior. As a remedy, prototype-based networks have been proposed, but their interpretability is limited as they have been shown to provide coarse, unreliable, and imprecise explanations. \n In this work, we introduce Proto-BagNets, an interpretable-by-design prototype-based model that combines the advantages of bag-of-local feature models and prototype learning to provide meaningful, coherent, and relevant prototypical parts needed for accurate and interpretable image classification tasks. \n We evaluated the Proto-BagNet for drusen detection on publicly available retinal OCT data. The Proto-BagNet performed comparably to the state-of-the-art interpretable and non-interpretable models while providing faithful, accurate, and clinically meaningful local and global explanations.", "title":"This actually looks like that: Proto-BagNets for local and global interpretability-by-design", "authors":[ "Djoumessi, Kerol", "Bah, Bubacarr", "Ku\u0308hlewein, Laura", "Berens, Philipp", "Koch, Lisa" ], "id":"Conference", "arxiv_id":"2406.15168", "GitHub":[ "https:\/\/github.com\/kdjoumessi\/Proto-BagNets" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":134 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1263_paper.pdf", "bibtext":"@InProceedings{ Han_InterIntra_MICCAI2024,\n author = { Han, Xiangmin and Xue, Rundong and Du, Shaoyi and Gao, Yue },\n title = { { Inter-Intra High-Order Brain Network for ASD Diagnosis via Functional MRIs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Currently in the field of computer-aided diagnosis, graph or hypergraph-based methods are widely used in the diagnosis of neurological diseases.\nHowever, existing graph-based work primarily focuses on pairwise correlations, neglecting high-order correlations. Additionally, existing hypergraph methods can only explore the commonality of high-order representations at a single scale, resulting in the lack of a framework that can integrate multi-scale high-order correlations. To address the above issues, we propose an Inter-Intra High-order Brain Network (I2HBN) framework for ASD-assisted diagnosis, which is divided into two parts: intra-hypergraph computation and inter-hypergraph computation. \nSpecifically, the intra-hypergraph computation employs the hypergraph to represent high-order correlations among different brain regions based on fMRI signal, generating intra-embeddings and intra-results. Subsequently, inter-hypergraph computation utilizes these intra-embeddings as features of inter-vertices to model inter-hypergraph that captures the inter-correlations among individuals at the population level. Finally, the intra-results and the inter-results are weighted to perform brain disease diagnosis. We demonstrate the potential of this method on two ABIDE datasets (NYU and UCLA), the results show that the proposed method for ASD diagnosis has superior performance, compared with existing state-of-the-art methods.", "title":"Inter-Intra High-Order Brain Network for ASD Diagnosis via Functional MRIs", "authors":[ "Han, Xiangmin", "Xue, Rundong", "Du, Shaoyi", "Gao, Yue" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":135 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2444_paper.pdf", "bibtext":"@InProceedings{ Yim_DermaVQA_MICCAI2024,\n author = { Yim, Wen-wai and Fu, Yujuan and Sun, Zhaoyi and Ben Abacha, Asma and Yetisgen, Meliha and Xia, Fei },\n title = { { DermaVQA: A Multilingual Visual Question Answering Dataset for Dermatology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Remote medical care has become commonplace with the establishment of patient portals, the maturation of web technologies, and the proliferation of personal devices. However, though on-demand care provides convenience and expands patient access, this same phenomenon may lead to increased workload for healthcare providers. Drafting candidate responses may help speed up physician workflows answering electronic messages. One specialty that may benefit from the latest multi-modal vision-language foundational models is dermatology. However, there is no existing dataset that incorporate dermatological health queries along with user-generated images. In this work, we contribute a new dataset, DermaVQA(https:\/\/osf.io\/72rp3\/), for the task of dermatology question answering and we benchmark the performance of state-of-the-art multi-modal models on multilingual response generation using relevant multi-reference metrics. The dataset and corresponding code are available on our project\u2019s GitHub repository (https:\/\/github.com\/velvinnn\/DermaVQA).", "title":"DermaVQA: A Multilingual Visual Question Answering Dataset for Dermatology", "authors":[ "Yim, Wen-wai", "Fu, Yujuan", "Sun, Zhaoyi", "Ben Abacha, Asma", "Yetisgen, Meliha", "Xia, Fei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/velvinnn\/DermaVQA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":136 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2521_paper.pdf", "bibtext":"@InProceedings{ Hua_DESSAM_MICCAI2024,\n author = { Huang, Lina and Liang, Yixiong and Liu, Jianfeng },\n title = { { DES-SAM: Distillation-Enhanced Semantic SAM for Cervical Nuclear Segmentation with Box Annotation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Nuclei segmentation in cervical cell images is a crucial technique for the automatic diagnosis of cervical cell pathology. The current state-of-the-art (SOTA) nuclei segmentation methods often require significant time and resources to provide pixel-level annotations for training. To reduce the labor-intensive annotation costs, we propose DES-SAM, a box-supervised cervical nucleus segmentation network with strong generalization ability based on self-distillation prompting. We utilize Segment Anything Model (SAM) to generate high-quality pseudo-labels by integrating a lightweight detector. The main challenges lie in the poor generalization ability brought by small-scale training datasets and the large-scale training parameters of traditional knowledge distillation frameworks. To address these challenges, we propose leveraging the strong feature extraction ability of SAM and a self-distillation prompting strategy to maximize the performance of the downstream nuclear semantic segmentation task without compromising SAM\u2019s generalization. Additionally, we propose an Edge-aware Enhanced Loss to improve the segmentation capability of DES-SAM. Various comparative and generalization experiments on public cervical cell nuclei datasets demonstrate the effectiveness of the proposed method.", "title":"DES-SAM: Distillation-Enhanced Semantic SAM for Cervical Nuclear Segmentation with Box Annotation", "authors":[ "Huang, Lina", "Liang, Yixiong", "Liu, Jianfeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CVIU-CSU\/DES-SAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":137 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0040_paper.pdf", "bibtext":"@InProceedings{ Xie_MHpFLGB_MICCAI2024,\n author = { Xie, Luyuan and Lin, Manqing and Xu, ChenMing and Luan, Tianyu and Zeng, Zhipeng and Qian, Wenjun and Li, Cong and Fang, Yuejian and Shen, Qingni and Wu, Zhonghai },\n title = { { MH-pFLGB: Model Heterogeneous personalized Federated Learning via Global Bypass for Medical Image Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the evolving application of medical artificial intelligence, federated learning stands out for its capacity to protect the privacy of training data, enabling collaborative model development without sharing local data from healthcare entities. However, the heterogeneity of data and systems across institutions presents significant challenges, undermining the efficiency of federated learning and the exchange of information between clients. To address these issues, we introduce a novel approach, MH-pFLGB, which employs a global bypass strategy to mitigate the reliance on public datasets and navigate the complexities of non-IID data distributions. Our method enhances traditional federated learning by integrating a global bypass model, which would share the information among the client, but also serves as part of the network to enhance the performance on each client. Additionally, \\model provides a feature fusion module to better combine the local and global features. We validate MH-pFLGB\u2019s effectiveness and adaptability through extensive testing on different medical tasks, demonstrating superior performance compared to existing state-of-the-art methods.", "title":"MH-pFLGB: Model Heterogeneous personalized Federated Learning via Global Bypass for Medical Image Analysis", "authors":[ "Xie, Luyuan", "Lin, Manqing", "Xu, ChenMing", "Luan, Tianyu", "Zeng, Zhipeng", "Qian, Wenjun", "Li, Cong", "Fang, Yuejian", "Shen, Qingni", "Wu, Zhonghai" ], "id":"Conference", "arxiv_id":"2407.00474", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":138 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0286_paper.pdf", "bibtext":"@InProceedings{ Wan_LKMUNet_MICCAI2024,\n author = { Wang, Jinhong and Chen, Jintai and Chen, Danny Z. and Wu, Jian },\n title = { { LKM-UNet: Large Kernel Vision Mamba UNet for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"In clinical practice, medical image segmentation provides useful information on the contours and dimensions of target organs or tissues, facilitating improved diagnosis, analysis, and treatment. In the past few years, convolutional neural networks (CNNs) and Transformers have dominated this area, but they still suffer from either limited receptive fields or costly long-range modeling. Mamba, a State Space Sequence Model (SSM), recently emerged as a promising paradigm for long-range dependency modeling with linear complexity. In this paper, we introduce a Large Kernel vision Mamba U-shape Network, or LKM-UNet, for medical image segmentation. A distinguishing feature of our LKM-UNet is its utilization of large Mamba kernels, excelling in locally spatial modeling compared to small kernel-based CNNs and Transformers, while maintaining superior efficiency in global modeling compared to self-attention with quadratic complexity. Additionally, we design a novel hierarchical and bidirectional Mamba block to further enhance Mamba\u2019s global and neighborhood spatial modeling capability for vision inputs. Comprehensive experiments demonstrate the feasibility and the effectiveness of using large-size Mamba kernels to achieve large receptive fields. Codes are available at https:\/\/github.com\/wjh892521292\/LKM-UNet.", "title":"LKM-UNet: Large Kernel Vision Mamba UNet for Medical Image Segmentation", "authors":[ "Wang, Jinhong", "Chen, Jintai", "Chen, Danny Z.", "Wu, Jian" ], "id":"Conference", "arxiv_id":"2403.07332", "GitHub":[ "https:\/\/github.com\/wjh892521292\/LKM-UNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":139 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1552_paper.pdf", "bibtext":"@InProceedings{ Gao_Improving_MICCAI2024,\n author = { Gao, Yuan and Zhou, Hong-Yu and Wang, Xin and Zhang, Tianyu and Han, Luyi and Lu, Chunyao and Liang, Xinglong and Teuwen, Jonas and Beets-Tan, Regina and Tan, Tao and Mann, Ritse },\n title = { { Improving Neoadjuvant Therapy Response Prediction by Integrating Longitudinal Mammogram Generation with Cross-Modal Radiological Reports: A Vision-Language Alignment-guided Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Longitudinal imaging examinations are vital for predicting pathological complete response (pCR) to neoadjuvant therapy (NAT) by assessing changes in tumor size and density. However, quite-often the imaging modalities at different time points during NAT may differ from patients, hindering comprehensive treatment response estimation when utilizing multi-modal information. This may result in underestimation or overestimation of disease status. Also, existing longitudinal image generation models mainly rely on raw-pixel inputs while less exploring in the integration with practical longitudinal radiology reports, which can convey valuable temporal content on disease remission or progression. Further, extracting textual-aligned dynamic information from longitudinal images poses a challenge. To address these issues, we propose a longitudinal image-report alignment-guided model for longitudinal mammogram generation using cross-modality radiology reports. We utilize generated mammograms to compensate for absent mammograms in our pCR prediction pipeline. Our experimental result achieves comparable performance to the theoretical upper bound, therefore providing a potential 3-month window for therapeutic replacement. The code will be accessible to the public.", "title":"Improving Neoadjuvant Therapy Response Prediction by Integrating Longitudinal Mammogram Generation with Cross-Modal Radiological Reports: A Vision-Language Alignment-guided Model", "authors":[ "Gao, Yuan", "Zhou, Hong-Yu", "Wang, Xin", "Zhang, Tianyu", "Han, Luyi", "Lu, Chunyao", "Liang, Xinglong", "Teuwen, Jonas", "Beets-Tan, Regina", "Tan, Tao", "Mann, Ritse" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yawwG\/LIMRA\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":140 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3797_paper.pdf", "bibtext":"@InProceedings{ Kas_IHRRBDINO_MICCAI2024,\n author = { Kasem, Mahmoud SalahEldin and Abdallah, Abdelrahman and Abdelhalim, Ibrahim and Alghamdi, Norah Saleh and Contractor, Sohail and El-Baz, Ayman },\n title = { { IHRRB-DINO: Identifying High-Risk Regions of Breast Masses in Mammogram Images Using Data-Driven Instance Noise (DINO) } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we introduce IHRRB-DINO, an advanced model designed to assist radiologists in effectively detecting breast masses in mammogram images. This tool is specifically engineered to highlight high-risk regions, enhancing the capability of radiologists in identifying breast masses for more accurate and efficient assessments. Our approach incorporates a novel technique that employs Data-Driven Instance Noise (DINO) for Object Localization, which significantly improves breast mass localization. This method is augmented by data augmentation using instance-level noise during the training phase, focusing on refining the model\u2019s proficiency in precisely localizing breast masses in mammographic images. Rigorous testing and validation conducted on the BI-RADS dataset using our model, especially with the Swin-L backbone, have demonstrated promising results. We achieved an Average Precision (AP) of 46.96, indicating a substantial improvement in the accuracy and consistency of breast cancer (BC) detection and localization. These results underscore the potential of IHRRB-DINO in contributing to the advancements in computer-aided diagnosis systems for breast cancer, marking a significant stride in the field of medical imaging technology.", "title":"IHRRB-DINO: Identifying High-Risk Regions of Breast Masses in Mammogram Images Using Data-Driven Instance Noise (DINO)", "authors":[ "Kasem, Mahmoud SalahEldin", "Abdallah, Abdelrahman", "Abdelhalim, Ibrahim", "Alghamdi, Norah Saleh", "Contractor, Sohail", "El-Baz, Ayman" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":141 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3513_paper.pdf", "bibtext":"@InProceedings{ Tiv_Hallucination_MICCAI2024,\n author = { Tivnan, Matthew and Yoon, Siyeop and Chen, Zhennong and Li, Xiang and Wu, Dufan and Li, Quanzheng },\n title = { { Hallucination Index: An Image Quality Metric for Generative Reconstruction Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generative image reconstruction algorithms such as measurement conditioned diffusion models are increasingly popular in the field of medical imaging. These powerful models can transform low signal-to-noise ratio (SNR) inputs into outputs with the appearance of high SNR. However, the outputs can have a new type of error called hallucinations. In medical imaging, these hallucinations may not be obvious to a Radiologist but could cause diagnostic errors. Generally, hallucination refers to error in estimation of object structure caused by a machine learning model, but there is no widely accepted method to evaluate hallucination magnitude. In this work, we propose a new image quality metric called the hallucination index. Our approach is to compute the Hellinger distance from the distribution of reconstructed images to a zero hallucination reference distribution. To evaluate our approach, we conducted a numerical experiment with electron microcopy images, simulated noisy measurements, and applied diffusion based reconstructions. We sampled the measurements and the generative reconstructions repeatedly to compute the sample mean and covariance. For the zero hallucination reference, we used the forward diffusion process applied to ground truth. Our results show that higher measurement SNR leads to lower hallucination index for the same apparent image quality. We also evaluated the impact of early stopping in the reverse diffusion process and found that more modest denoising strengths can reduce hallucination. We believe this metric could be useful for evaluation of generative image reconstructions or as a warning label to inform radiologists about the degree of hallucinations in medical images.", "title":"Hallucination Index: An Image Quality Metric for Generative Reconstruction Models", "authors":[ "Tivnan, Matthew", "Yoon, Siyeop", "Chen, Zhennong", "Li, Xiang", "Wu, Dufan", "Li, Quanzheng" ], "id":"Conference", "arxiv_id":"2407.12780", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":142 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1816_paper.pdf", "bibtext":"@InProceedings{ Zha_Spatialaware_MICCAI2024,\n author = { Zhang, Zerui and Sun, Zhichao and Liu, Zelong and Zhao, Zhou and Yu, Rui and Du, Bo and Xu, Yongchao },\n title = { { Spatial-aware Attention Generative Adversarial Network for Semi-supervised Anomaly Detection in Medical Image } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical anomaly detection is a critical research area aimed at recognizing abnormal images to aid in diagnosis. Most existing methods adopt synthetic anomalies and image restoration on normal samples to detect anomaly. The unlabeled data consisting of both normal and abnormal data is not well explored. We introduce a novel Spatial-aware Attention Generative Adversarial Network (SAGAN) for one-class semi-supervised generation of health images. Our core insight is the utiliza tion of position encoding and attention to accurately focus on restoring abnormal regions and preserving normal regions. To fully utilize the unlabelled data, SAGAN relaxes the cyclic consistency requirement of the existing unpaired image-to image conversion methods, and generates high-quality health images corresponding to unlabeled data, guided by the reconstruction of normal images and restoration of pseudo-anomaly images. Subsequently, the discrepancy between the generated healthy image and the original image is utilized as an anomaly score. Extensive experiments on three medical datasets demonstrate that the proposed SAGAN outperforms the state-of-the-art methods. Code is available at https:\/\/github.com\/zzr728\/SAGAN", "title":"Spatial-aware Attention Generative Adversarial Network for Semi-supervised Anomaly Detection in Medical Image", "authors":[ "Zhang, Zerui", "Sun, Zhichao", "Liu, Zelong", "Zhao, Zhou", "Yu, Rui", "Du, Bo", "Xu, Yongchao" ], "id":"Conference", "arxiv_id":"2405.12872", "GitHub":[ "https:\/\/github.com\/zzr728\/SAGAN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":143 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0562_paper.pdf", "bibtext":"@InProceedings{ Che_LUCIDA_MICCAI2024,\n author = { Chen, Yixin and Meng, Xiangxi and Wang, Yan and Zeng, Shuang and Liu, Xi and Xie, Zhaoheng },\n title = { { LUCIDA: Low-dose Universal-tissue CT Image Domain Adaptation For Medical Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation in low-dose CT scans remains a challenge in medical imaging, primarily due to the high annotation costs. This study introduces LUCIDA, a Low-dose Universal-tissue CT Image Domain Adaptation model operating under an unsupervised protocol without requiring LDCT annotations. It uniquely incorporates the Weighted Segmentation Reconstruction (WSR) module to establish a linear relationship between prediction maps and reconstructed images. By enhancing the quality of reconstructed images, LUCIDA improves the accuracy of prediction maps, facilitating a new domain adaptation framework. Extensive evaluation experiments demonstrate LUCIDA\u2019s effectiveness in accurately recognizing a wide range of tissues, significantly outperforming traditional methods. We also introduce the LUCIDA-Ensemble model, demonstrating comparable performance to supervised learning models in organ segmentation and recognizing 112 tissue types.", "title":"LUCIDA: Low-dose Universal-tissue CT Image Domain Adaptation For Medical Segmentation", "authors":[ "Chen, Yixin", "Meng, Xiangxi", "Wang, Yan", "Zeng, Shuang", "Liu, Xi", "Xie, Zhaoheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/YixinChen-AI\/LUCIDA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":144 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0293_paper.pdf", "bibtext":"@InProceedings{ Li_PRISM_MICCAI2024,\n author = { Li, Hao and Liu, Han and Hu, Dewei and Wang, Jiacheng and Oguz, Ipek },\n title = { { PRISM: A Promptable and Robust Interactive Segmentation Model with Visual Prompts } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we present PRISM, a Promptable and Robust Interactive Segmentation Model, aiming for precise segmentation of 3D medical images. PRISM accepts various visual inputs, including points, boxes, and scribbles as sparse prompts, as well as masks as dense prompts. Specifically, PRISM is designed with four principles to achieve robustness: (1) Iterative learning. The model produces segmentations by using visual prompts from previous iterations to achieve progressive improvement. (2) Confidence learning. PRISM employs multiple segmentation heads per input image, each generating a candidate mask and a confidence score to optimize predictions. (3) Corrective learning. Following each segmentation iteration, PRISM employs a shallow corrective refinement network to reassign mislabeled voxels. (4) Hybrid design. PRISM integrates hybrid encoders to better capture both the local and global information. Comprehensive validation of PRISM is conducted using four public datasets for tumor segmentation in the colon, pancreas, liver, and kidney, highlighting challenges caused by anatomical variations and ambiguous boundaries in accurate tumor identification. Compared to state-of-the-art methods, both with and without prompt engineering, PRISM significantly improves performance, achieving results that are close to human levels.", "title":"PRISM: A Promptable and Robust Interactive Segmentation Model with Visual Prompts", "authors":[ "Li, Hao", "Liu, Han", "Hu, Dewei", "Wang, Jiacheng", "Oguz, Ipek" ], "id":"Conference", "arxiv_id":"2404.15028", "GitHub":[ "https:\/\/github.com\/MedICL-VU\/PRISM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":145 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0014_paper.pdf", "bibtext":"@InProceedings{ Li_TPDRSeg_MICCAI2024,\n author = { Li, Wenxue and Xiong, Xinyu and Xia, Peng and Ju, Lie and Ge, Zongyuan },\n title = { { TP-DRSeg: Improving Diabetic Retinopathy Lesion Segmentation with Explicit Text-Prompts Assisted SAM } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advances in large foundation models, such as the Segment Anything Model (SAM), have demonstrated considerable promise across various tasks. Despite their progress, these models still encounter challenges in specialized medical image analysis, especially in recognizing subtle inter-class differences in Diabetic Retinopathy (DR) lesion segmentation. In this paper, we propose a novel framework that customizes SAM for text-prompted DR lesion segmentation, termed TP-DRSeg. Our core idea involves exploiting language cues to inject medical prior knowledge into the vision-only segmentation network, thereby combining the advantages of different foundation models and enhancing the credibility of segmentation. Specifically, to unleash the potential of vision-language models in the recognition of medical concepts, we propose an explicit prior encoder that transfers implicit medical concepts into explicit prior knowledge, providing explainable clues to excavate low-level features associated with lesions. Furthermore, we design a prior-aligned injector to inject explicit priors into the segmentation process, which can facilitate knowledge sharing across multi-modality features and allow our framework to be trained in a parameter-efficient fashion. Experimental results demonstrate the superiority of our framework over other traditional models and foundation model variants. The code implementations are accessible at https:\/\/github.com\/wxliii\/TP-DRSeg.", "title":"TP-DRSeg: Improving Diabetic Retinopathy Lesion Segmentation with Explicit Text-Prompts Assisted SAM", "authors":[ "Li, Wenxue", "Xiong, Xinyu", "Xia, Peng", "Ju, Lie", "Ge, Zongyuan" ], "id":"Conference", "arxiv_id":"2406.15764", "GitHub":[ "https:\/\/github.com\/wxliii\/TP-DRSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":146 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2633_paper.pdf", "bibtext":"@InProceedings{ She_GCAN_MICCAI2024,\n author = { Shen, Xiongri and Song, Zhenxi and Zhang, Zhiguo },\n title = { { GCAN: Generative Counterfactual Attention-guided Network for Explainable Cognitive Decline Diagnostics based on fMRI Functional Connectivity } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diagnosis of mild cognitive impairment (MCI) and subjective cognitive decline (SCD) from fMRI functional connectivity (FC) has gained popularity, but most FC-based diagnostic models are black boxes lacking casual reasoning so they contribute little to the knowledge about FC-based neural biomarkers of cognitive decline. To enhance the explainability of diagnostic models, we propose a generative coun\u0002terfactual attention-guided network (GCAN), which introduces coun\u0002terfactual reasoning to recognize cognitive decline-related brain regions and then uses these regions as attention maps to boost the prediction performance of diagnostic models. Furthermore, to tackle the difficulty in the generation of highly-structured and brain-atlas constrained FC, which is essential in counterfactual reasoning, an Atlas-Aware Bidirectional Transformer (AABT) method is developed. AABT employs a bidirectional strategy to encode and decode the tokens from each network of brain atlas, thereby enhancing the generation of high-quality target label FC. In the experiments of in-house and public datasets, the generated attention maps closely resemble FC changes in the literature on neurodegenerative diseases. The diagnostic performance is also superior to baseline and SOTA models. The code is available at https:\/\/anonymous.4open.science\/status\/GCAN-665C.", "title":"GCAN: Generative Counterfactual Attention-guided Network for Explainable Cognitive Decline Diagnostics based on fMRI Functional Connectivity", "authors":[ "Shen, Xiongri", "Song, Zhenxi", "Zhang, Zhiguo" ], "id":"Conference", "arxiv_id":"2403.01758", "GitHub":[ "https:\/\/github.com\/SXR3015\/GCAN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":147 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1516_paper.pdf", "bibtext":"@InProceedings{ Zin_Towards_MICCAI2024,\n author = { Zinsou, Kp\u00eatch\u00e9hou\u00e9 Merveille Santi and Diop, Cheikh Talibouya and Diop, Idy and Tsirikoglou, Apostolia and Siddig, Emmanuel Edwar and Sow, Doudou and Ndiaye, Maodo },\n title = { { Towards Rapid Mycetoma Species Diagnosis: A Deep Learning Approach for Stain-Invariant Classification on H&E Images from Senegal } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Mycetoma, categorized as a Neglected Tropical Disease (NTD), poses significant health, social, and economic challenges due to its causative agents, which include both bacterial and fungal pathogens. Accurate identification of the mycetoma type and species is crucial for initiating appropriate medical interventions, as treatment strategies vary widely. Although several diagnostic tools have been developed over time, histopathology remains a most used method due to its quickness, cost-effectiveness and simplicity. However, its reliance on expert pathologists to perform the diagnostic procedure and accurately interpret the result, particularly in resource-limited settings.\nAdditionally, pathologists face the challenge of stain variability during the histopathological analyses on slides.\nIn response to this need, this study pioneers an automated approach to mycetoma species identification using histopathological images from black skin patients in Senegal. Integrating various stain normalization techniques such as macenko, vahadane, and Reinhard to mitigate color variations, we combine these methods with the MONAI framework alongside DenseNet121 architecture. Our system achieves an average accuracy of 99.34%, 94.06%, 94.45% respectively on Macenko, Reinhard and Vahadane datasets. The system is trained using an original dataset comprising histopathological images stained with Hematoxylin and Eosin (H&E), meticulously collected, annotated, and labeled from various hospitals across Senegal.\nThis study represents a significant advancement in the field of mycetoma diagnosis, offering a reliable and efficient solution that can facilitate timely and accurate species identification, particularly in endemic regions like Senegal.", "title":"Towards Rapid Mycetoma Species Diagnosis: A Deep Learning Approach for Stain-Invariant Classification on H E Images from Senegal", "authors":[ "Zinsou, Kp\u00eatch\u00e9hou\u00e9 Merveille Santi", "Diop, Cheikh Talibouya", "Diop, Idy", "Tsirikoglou, Apostolia", "Siddig, Emmanuel Edwar", "Sow, Doudou", "Ndiaye, Maodo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":148 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0663_paper.pdf", "bibtext":"@InProceedings{ Xin_SegMamba_MICCAI2024,\n author = { Xing, Zhaohu and Ye, Tian and Yang, Yijun and Liu, Guang and Zhu, Lei },\n title = { { SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Transformer architecture has demonstrated remarkable results in 3D medical image segmentation due to its capability of modeling global relationships.\nHowever, it poses a significant computational burden when processing high-dimensional medical images.\nMamba, as a State Space Model (SSM), has recently emerged as a notable approach for modeling long-range dependencies in sequential data, and has excelled in the field of natural language processing with its remarkable memory efficiency and computational speed.\nInspired by this, we devise \\textbf{SegMamba}, a novel 3D medical image \\textbf{Seg}mentation \\textbf{Mamba} model, to effectively capture long-range dependencies within whole-volume features at every scale.\nOur SegMamba outperforms Transformer-based methods in whole-volume feature modeling, maintaining high efficiency even at a resolution of {$64\\times 64\\times 64$}, where the sequential length is approximately 260k.\nMoreover, we collect and annotate a novel large-scale dataset (named CRC-500) to facilitate benchmarking evaluation in 3D colorectal cancer (CRC) segmentation.\nExperimental results on our CRC-500 and two public benchmark datasets further demonstrate the effectiveness and universality of our method.", "title":"SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation", "authors":[ "Xing, Zhaohu", "Ye, Tian", "Yang, Yijun", "Liu, Guang", "Zhu, Lei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ge-xing\/segmamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":149 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0253_paper.pdf", "bibtext":"@InProceedings{ Liu_PEPSI_MICCAI2024,\n author = { Liu, Peirong and Puonti, Oula and Sorby-Adams, Annabel and Kimberly, W. Taylor and Iglesias, Juan E. },\n title = { { PEPSI: Pathology-Enhanced Pulse-Sequence-Invariant Representations for Brain MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Remarkable progress has been made by data-driven machine-learning methods in the analysis of MRI scans. However, most existing MRI analysis approaches are crafted for specific MR pulse sequences (MR contrasts) and usually require nearly isotropic acquisitions. This limits their applicability to the diverse, real-world clinical data, where scans commonly exhibit variations in appearances due to being obtained with varying sequence parameters, resolutions, and orientations \u2013 especially in the presence of pathology. In this paper, we propose PEPSI, the first pathology-enhanced, and pulse-sequence-invariant feature representation learning model for brain MRI. PEPSI is trained entirely on synthetic images with a novel pathology encoding strategy, and enables co-training across datasets with diverse pathologies and missing modalities. Despite variations in pathology appearances across different MR pulse sequences or the quality of acquired images (e.g., resolution, orientation, artifacts, etc), PEPSI produces a high-resolution image of reference contrast (MP-RAGE) that captures anatomy, along with an image specifically highlighting the pathology. Our experiments demonstrate PEPSI\u2019s remarkable capability for image synthesis compared with the state-of-the-art, contrast-agnostic synthesis models, as it accurately reconstructs anatomical structures while differentiating between pathology and normal tissue. We further illustrate the efficiency and effectiveness of PEPSI features for downstream pathology segmentation on five public datasets covering white matter hyperintensities and stroke lesions.", "title":"PEPSI: Pathology-Enhanced Pulse-Sequence-Invariant Representations for Brain MRI", "authors":[ "Liu, Peirong", "Puonti, Oula", "Sorby-Adams, Annabel", "Kimberly, W. Taylor", "Iglesias, Juan E." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/peirong26\/PEPSI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":150 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2373_paper.pdf", "bibtext":"@InProceedings{ Zha_See_MICCAI2024,\n author = { Zhao, Ziyuan and Fang, Fen and Yang, Xulei and Xu, Qianli and Guan, Cuntai and Zhou, S. Kevin },\n title = { { See, Predict, Plan: Diffusion for Procedure Planning in Robotic Surgical Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic surgical video analysis is pivotal in enhancing the effectiveness and safety of robot-assisted minimally invasive surgery. This study introduces a novel procedure planning task aimed at predicting target-conditioned actions in surgical videos to achieve desired visual goals, thereby addressing the question of ``What to do to achieve a desired visual goal?\u201d. Leveraging recent advancements in deep learning, particularly diffusion models, our work proposes the Multi-Scale Phase-Condition Diffusion (MS-PCD) framework. This innovative approach incorporates multi-scale visual features into the diffusion process, conditioned by phase class, to generate goal-conditioned plans. By cascading multiple diffusion models with inputs at different scales, MS-PCD adaptively extracts fine-grained visual features, significantly enhancing procedure planning performance in unstructured robotic surgical videos. We establish a new benchmark for procedure planning in robotic surgical videos using the publicly available PSI-AVA dataset, demonstrating that our method notably outperforms existing baselines on several metrics. Our research not only presents an innovative approach to surgical video analysis but also opens new avenues for automation in surgical procedures, contributing to both patient safety and surgical training.", "title":"See, Predict, Plan: Diffusion for Procedure Planning in Robotic Surgical Videos", "authors":[ "Zhao, Ziyuan", "Fang, Fen", "Yang, Xulei", "Xu, Qianli", "Guan, Cuntai", "Zhou, S. Kevin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":151 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3315_paper.pdf", "bibtext":"@InProceedings{ Car_Characterizing_MICCAI2024,\n author = { Carrera-Pinz\u00f3n, Andr\u00e9s Felipe and Toro-Quitian, Leonard and Torres, Juan Camilo and Cer\u00f3n, Alexander and Sarmiento, Wils\u00f3n and Mendez-Toro, Arnold and Cruz-Roa, Angel and Guti\u00e9rrez-Carvajal, R. E. and \u00d3rtiz-Davila, Carlos and Gonz\u00e1lez, Fabio and Romero, Eduardo and Iregui Guerrero, Marcela },\n title = { { Characterizing the left ventricular ultrasound dynamics in the frequency domain to estimate the cardiac function } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Assessment of cardiac function typically relies on the Left Ventricular Ejection Fraction (LVEF), i.e., the ratio between diastolic and systolic volumes. However, inconsistent LVEF values have been reported in many clinic situations. This study introduces a novel approach to quantify the cardiac function by analyzing the frequency patterns of the segmented Left Ventricle (LV) along the entire cardiac cycle in the four-chamber-image of echocardiography videos. After automatic segmentation of the left ventricle, the area is computed during a complete cycle and the obtained signal is transformed to the frequency space. A soft clustering of the spectrum magnitude was performed with 7.835 cases from the EchoNet-dynamic open database by applying spectral clustering with Euclidean distance and eigengap heuristics to obtain four dense groups. Once groups were set, the medoid of each was used as representant, and for a set of 99 test cases from a local collection with different underlying pathology, the magnitude distance to the medoid was replaced by the norm of the sum of vectors representing both the medoid and a particular case making an angle estimated from the dot product between the temporal signals obtained from the inverse Fourier transform of the spectrum phase of each and a constant magnitude. Results show the four clusters characterize different types of patterns, and while LVEF was usually spread within clusters and mixed up the clinic condition, the new indicator showed a narrow progression consistent with the particular pathology degree.", "title":"Characterizing the left ventricular ultrasound dynamics in the frequency domain to estimate the cardiac function", "authors":[ "Carrera-Pinz\u00f3n, Andr\u00e9s Felipe", "Toro-Quitian, Leonard", "Torres, Juan Camilo", "Cer\u00f3n, Alexander", "Sarmiento, Wils\u00f3n", "Mendez-Toro, Arnold", "Cruz-Roa, Angel", "Guti\u00e9rrez-Carvajal, R. E.", "\u00d3rtiz-Davila, Carlos", "Gonz\u00e1lez, Fabio", "Romero, Eduardo", "Iregui Guerrero, Marcela" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":152 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1997_paper.pdf", "bibtext":"@InProceedings{ Gua_Labelguided_MICCAI2024,\n author = { Guan, Jiale and Zou, Xiaoyang and Tao, Rong and Zheng, Guoyan },\n title = { { Label-guided Teacher for Surgical Phase Recognition via Knowledge Distillation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic surgical phase recognition plays an essential role in developing advanced, context-aware, computer-assisted intervention systems. Knowledge distillation is an effective framework to transfer knowledge from a teacher network to a student network, which has been used to solve the challenging surgical phase recognition task. A key to a successful knowledge distillation is to learn a better teacher network. To this end, we propose a novel label-guided teacher network for knowledge distillation. Specifically, our teacher network takes both video frames and ground-truth labels as input. Instead of only using labels to supervise the final predictions, we additionally introduce two types of label guidance to learn a better teacher: 1) we propose label embedding-frame feature cross-attention transformer blocks for feature enhancement; and 2) we propose to use label information to sample positive (from same phase) and negative features (from different phases) in a supervised contrastive learning framework to learn better feature embeddings. Then, by minimizing feature similarity, the knowledge learnt by our teacher network is effectively distilled into a student network. At inference stage, the distilled student network can perform accurate surgical phase recognition taking only video frames as input. Comprehensive experiments are conducted on two laparoscopic cholecystectomy video datasets to validate the proposed method, offering an accuracy of 93.3\u00b15.8 on the Cholec80 dataset and an accuracy of 91.6\u00b19.1 on the M2cai16 dataset.", "title":"Label-guided Teacher for Surgical Phase Recognition via Knowledge Distillation", "authors":[ "Guan, Jiale", "Zou, Xiaoyang", "Tao, Rong", "Zheng, Guoyan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":153 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0405_paper.pdf", "bibtext":"@InProceedings{ Ouy_SOM2LM_MICCAI2024,\n author = { Ouyang, Jiahong and Zhao, Qingyu and Adeli, Ehsan and Zaharchuk, Greg and Pohl, Kilian M. },\n title = { { SOM2LM: Self-Organized Multi-Modal Longitudinal Maps } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neuroimage modalities acquired by longitudinal studies often provide complementary information regarding disease progression. For example, amyloid PET visualizes the build-up of amyloid plaques that appear in earlier stages of Alzheimer\u2019s disease (AD), while structural MRIs depict brain atrophy appearing in the later stages of the disease. To accurately model multi-modal longitudinal data, we propose an interpretable self-supervised model called Self-Organized Multi-Modal Longitudinal Maps (SOM2LM). SOM2LM encodes each modality as a 2D self-organizing map (SOM) so that one dimension of each modality-specific SOMs corresponds to disease abnormality. The model also regularizes across modalities to depict their temporal order of capturing abnormality. When applied to longitudinal T1w MRIs and amyloid PET of the Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI, N=741), SOM2LM generates interpretable latent spaces that characterize disease abnormality. When compared to state-of-art models, it achieves higher accuracy for the downstream tasks of cross-modality prediction of amyloid status from T1w-MRI and joint-modality prediction of individuals with mild cognitive impairment converting to AD using both MRI and amyloid PET. The code is available at https:\/\/github.com\/ouyangjiahong\/longitudinal-som-multi-modality.", "title":"SOM2LM: Self-Organized Multi-Modal Longitudinal Maps", "authors":[ "Ouyang, Jiahong", "Zhao, Qingyu", "Adeli, Ehsan", "Zaharchuk, Greg", "Pohl, Kilian M." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ouyangjiahong\/longitudinal-som-multi-modality" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":154 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1606_paper.pdf", "bibtext":"@InProceedings{ Xie_An_MICCAI2024,\n author = { Xie, Shiyu and Zhang, Kai and Entezari, Alireza },\n title = { { An Evaluation of State-of-the-Art Projectors in the Presence of Noise and Nonlinearity in the Beer-Lambert Law } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Efficient computation of forward and back projection is key to scalability of iterative methods for low dose CT imaging at resolutions needed in clinical applications. State-of-the-art projectors provide computationally-efficient approximations to X-ray optics calculations in the forward model that strike a balance between speed and accuracy. While computational performance of these projectors are well studied, their accuracy is often analyzed in idealistic settings. When choosing a projector a key question is whether differences between projectors can impact image reconstruction in realistic settings where nonlinearity of the Beer-Lambert law and measurement noise may mask those differences. We present an approach for comparing the accuracy of projectors in practical settings where the effects of the Beer-Lambert law and measurement noise are captured by a sensitivity analysis of the forward model. Our experiments provide a comparative analysis of state-of-the-art projectors based on the impact of their approximations to the forward model on the reconstruction error. Our experiments suggest that the differences between projectors, measured by reconstruction errors, persists with noise in low-dose measurements and become significant in few-view imaging configurations.", "title":"An Evaluation of State-of-the-Art Projectors in the Presence of Noise and Nonlinearity in the Beer-Lambert Law", "authors":[ "Xie, Shiyu", "Zhang, Kai", "Entezari, Alireza" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ShiyuXie0116\/Evaluation-of-Projectors-Noise-Nonlinearity" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":155 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1779_paper.pdf", "bibtext":"@InProceedings{ Wan_BrainSCK_MICCAI2024,\n author = { Wang, Lilong and Liu, Mianxin and Zhang, Shaoting and Wang, Xiaosong },\n title = { { BrainSCK: Brain Structure and Cognition Alignment via Knowledge Injection and Reactivation for Diagnosing Brain Disorders } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Emerging evidence from advanced neuroimaging study suggests common neurological bases across different brain disorders (BD) throughout the human lifespan. Researchers thus aim to create a general neuroimaging-based diagnosis model for population-scale screening for multiple BDs. Existing models predominantly use the transfer learning paradigm for BD tasks based on either out-of-domain models pre-trained with large-scale but less-related data and tasks or in-domain models pre-trained on healthy population brain data with auxiliary tasks such as age prediction. The former approach has few recognition of inter-individual variations and BD-related features in the population-scale brain data, while the latter relies on weak implicit association between the proxy and BD tasks. In this work, we propose a two-stage vision-language model adaptation strategy to incorporate novel knowledge into the out-of-domain well pre-trained model (e.g., BLIP) by aligning basic cognition and brain structural features for accurate diagnosis of multiple BDs. First, using life-span Human Connectome Project data, we textualize the demographics and psychometrics records and construct knowledge-injecting textual prompts (with important cognitive science contexts). The model is expected to learn the alignment between brain structure from images and cognitive knowledge from texts. Then, we customize knowledge-reactivating instructions and further tune the model to accommodate the cognitive symptoms in each BD diagnosis task. Experimental results show that our framework outperforms other state-of-the-art methods on three BD diagnosis tasks of different age groups. It demonstrates a promising and feasible learning paradigm for adapting large foundation models to the cognitive neuroscience and neurology fields.", "title":"BrainSCK: Brain Structure and Cognition Alignment via Knowledge Injection and Reactivation for Diagnosing Brain Disorders", "authors":[ "Wang, Lilong", "Liu, Mianxin", "Zhang, Shaoting", "Wang, Xiaosong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/openmedlab\/BrainSCK" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":156 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0595_paper.pdf", "bibtext":"@InProceedings{ Thi_Conditional_MICCAI2024,\n author = { Thibeault, Sylvain and Romaguera, Liset Vazquez and Kadoury, Samuel },\n title = { { Conditional 4D Motion Diffusion Models with Masked Observations to Forecast Deformations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Image-guided radiotherapy procedures in the abdominal region require accurate real-time motion management for safe dose delivery.\nAnticipating future 4D motion using live in-plane imaging is crucial for accurate tumor tracking, which enables sparing normal tissue and reducing recurrence probabilities. However current real-time tracking methods often require a specific template and volumetric inputs, which is not feasible for online treatments. Generative models remain hindered by several issues, including complex loss functions and training processes. This paper presents a conditional motion diffusion model treating high-dimensional data, describing complex anatomical deformations. A discrete wavelet transform (DWT) maps inputs into a frequency domain, allowing to select top features for the denoising process. The end-to-end model includes a masking mechanism of deformation observations, where during training, a motion diffusion model is learned to produce deformations from random noise. For future sequences, a denoising process conditioned on input deformations and time-wise prior distributions are applied to generate smooth and continuous deformation outputs from cine 2D images. Lastly, a temporal 3D local tracking module exploiting latent representations is used to refine the local motion vectors around pre-defined tracked regions. The proposed forecasting technique allows to reduce errors by 62% when confronted to a 4D conditional Transformer displacement model, with target errors of 1.29+\/-0.95 mm, and mean geometrical errors of 1.05+\/-0.53 mm on forecasted abdominal MRI.", "title":"Conditional 4D Motion Diffusion Models with Masked Observations to Forecast Deformations", "authors":[ "Thibeault, Sylvain", "Romaguera, Liset Vazquez", "Kadoury, Samuel" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":157 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1173_paper.pdf", "bibtext":"@InProceedings{ Xie_DiffDGSS_MICCAI2024,\n author = { Xie, Yingpeng and Qu, Junlong and Xie, Hai and Wang, Tianfu and Lei, Baiying },\n title = { { DiffDGSS: Generalizable Retinal Image Segmentation with Deterministic Representation from Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Acquiring a comprehensive segmentation map of the retinal image serves as the preliminary step in developing an interpretable diagnostic tool for retinopathy. However, the inherent complexity of retinal anatomical structures and lesions, along with data heterogeneity and annotations scarcity, poses challenges to the development of accurate and generalizable models. Denoising diffusion probabilistic models (DDPM) have recently shown promise in various medical image applications. In this paper, driven by the motivation to leverage strong pre-trained DDPM, we introduce a novel framework, named DiffDGSS, to exploit the latent representations from the diffusion models for Domain Generalizable Semantic Segmentation (DGSS). In particular, we demonstrate that the deterministic inversion of diffusion models yields robust representations that allow for strong out-of-domain generalization. Subsequently, we develop an adaptive semantic feature interpreter for projecting these representations into an accurate segmentation map. Extensive experiments across various tasks (retinal lesion and vessel segmentation) and settings (cross-domain and cross-modality) demonstrate the superiority of our DiffDGSS over state-of-the-art methods.", "title":"DiffDGSS: Generalizable Retinal Image Segmentation with Deterministic Representation from Diffusion Models", "authors":[ "Xie, Yingpeng", "Qu, Junlong", "Xie, Hai", "Wang, Tianfu", "Lei, Baiying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":158 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1419_paper.pdf", "bibtext":"@InProceedings{ Gam_Automatic_MICCAI2024,\n author = { Gamal, Mahmoud and Baraka, Marwa and Torki, Marwan },\n title = { { Automatic Mandibular Semantic Segmentation of Teeth Pulp Cavity and Root Canals, and Inferior Alveolar Nerve on Pulpy3D Dataset } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of the pulp cavity, root canals, and inferior alveolar nerve (IAN) in dental imaging is essential for effective orthodontic interventions. Despite the availability of numerous Cone Beam Computed Tomography (CBCT) scans annotated for individual dental-anatomical structures, there is a lack of a comprehensive dataset covering all necessary parts. As a result, existing deep learning models have encountered challenges due to the scarcity of comprehensive datasets encompassing all relevant anatomical structures. We present our novel Pulpy3D dataset, specifically curated to address dental-anatomical structures\u2019 segmentation and identification needs. Additionally, we noticed that many current deep learning methods in dental imaging prefer 2D segmentation, missing out on the benefits of 3D segmentation. Our study suggests a UNet-based approach capable of segmenting dental structures using 3D volume segmentation, providing a better understanding of spatial relationships and more precise dental anatomy representation. Pulpy3D contributed in creating the seeding model from 150 scans, which helped complete the remainder of the dataset. Other modifications in the architecture, such as using separate networks, one semantic network, and a multi-task network, were highlighted in the model description to show how versatile the Pulpy3D dataset is and how different models, architectures, and tasks can run on the dataset. Additionally, we stress the lack of attention to pulp segmentation tasks in existing studies, underlining the need for specialized methods in this area. The code and Pulpy3D links can be found at https:\/\/github.com\/mahmoudgamal0\/Pulpy3D", "title":"Automatic Mandibular Semantic Segmentation of Teeth Pulp Cavity and Root Canals, and Inferior Alveolar Nerve on Pulpy3D Dataset", "authors":[ "Gamal, Mahmoud", "Baraka, Marwa", "Torki, Marwan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mahmoudgamal0\/Pulpy3D" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":159 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1614_paper.pdf", "bibtext":"@InProceedings{ Tan_Follow_MICCAI2024,\n author = { Tang, Xin and Cao, Zhi and Zhang, Weijing and Zhao, Di and Liao, Hongen and Zhang, Daoqiang and Chen, Fang },\n title = { { Follow Sonographers\u2019 Visual Scan-path: Adjusting CNN Model for Diagnosing Gout from Musculoskeletal Ultrasound } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The current models for automatic gout diagnosis train convolutional neural network (CNN) using musculoskeletal ultrasound (MSKUS) images paired with classification labels, which are annotated by skilled sonographers. However, this prevalent diagnostic model overlooks valuable supplementary information derived from sonographers\u2019 annotations, such as the visual scan-path followed by sonographers. We notice that\nthis annotation procedure offers valuable insight into human attention, aiding the CNN model in focusing on crucial features in gouty MSKUS scans, including the double contour sign, tophus, and snowstorm, which play a crucial role in sonographers\u2019 diagnostic decisions. To verify this, we create a gout MSKUS dataset that enriched with sonographers\u2019 annotation byproduct visual scan-path. Furthermore, we introduce a scan path based fine-tuning training mechanism (SFT) for gout diagnosis models, leveraging the annotation byproduct scan-paths for enhanced learning. The experimental results demonstrate the superiority of our SFT method over several SOTA CNNs.", "title":"Follow Sonographers\u2019 Visual Scan-path: Adjusting CNN Model for Diagnosing Gout from Musculoskeletal Ultrasound", "authors":[ "Tang, Xin", "Cao, Zhi", "Zhang, Weijing", "Zhao, Di", "Liao, Hongen", "Zhang, Daoqiang", "Chen, Fang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":160 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1193_paper.pdf", "bibtext":"@InProceedings{ Zhu_DiffuseReg_MICCAI2024,\n author = { Zhuo, Yongtai and Shen, Yiqing },\n title = { { DiffuseReg: Denoising Diffusion Model for Obtaining Deformation Fields in Unsupervised Deformable Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deformable image registration aims to precisely align medical images from different modalities or times. Traditional deep learning methods, while effective, often lack interpretability, real-time observability and adjustment capacity during registration inference. Denoising diffusion models present an alternative by reformulating registration as iterative image denoising. However, existing diffusion registration approaches do not fully harness capabilities, neglecting the critical sampling phase that enables continuous observability during the inference. Hence, we introduce DiffuseReg, an innovative diffusion-based method that denoises deformation fields instead of images for improved transparency. We also propose a novel denoising network upon Swin Transformer, which better integrates moving and fixed images with diffusion time step throughout the denoising process. Furthermore, we enhance control over the denoising registration process with a novel similarity consistency regularization. Experiments on ACDC datasets demonstrate DiffuseReg outperforms existing diffusion registration methods by 1.32% in Dice score. The sampling process in DiffuseReg enables real-time output observability and adjustment unmatched by previous deep models. The code is available at https:\/\/github.com\/KUJOYUTA\/DiffuseReg", "title":"DiffuseReg: Denoising Diffusion Model for Obtaining Deformation Fields in Unsupervised Deformable Image Registration", "authors":[ "Zhuo, Yongtai", "Shen, Yiqing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/KUJOYUTA\/DiffuseReg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":161 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2182_paper.pdf", "bibtext":"@InProceedings{ Kum_Continual_MICCAI2024,\n author = { Kumari, Pratibha and Reisenb\u00fcchler, Daniel and Luttner, Lucas and Schaadt, Nadine S. and Feuerhake, Friedrich and Merhof, Dorit },\n title = { { Continual Domain Incremental Learning for Privacy-aware Digital Pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In recent years, there has been remarkable progress in the field of digital pathology, driven by the ability to model complex tissue patterns using advanced deep-learning algorithms. However, the robustness of these models is often severely compromised in the presence of data shifts (e.g., different stains, organs, centers, etc.). Alternatively, continual learning (CL) techniques aim to reduce the forgetting of past data when learning new data with distributional shift conditions. Specifically, rehearsal-based CL techniques, which store some past data in a buffer and then replay it with new data, have proven effective in medical image analysis tasks. However, privacy concerns arise as these approaches store past data, prompting the development of our novel Generative Latent Replay-based CL (GLRCL) approach. GLRCL captures the previous distribution through Gaussian Mixture Models instead of storing past samples, which are then utilized to generate features and perform latent replay with new data. We systematically evaluate our proposed framework under different shift conditions in histopathology data, including stain and organ shift. Our approach significantly outperforms popular buffer-free CL approaches and performs similarly to rehearsal-based CL approaches that require large buffers causing serious privacy violations.", "title":"Continual Domain Incremental Learning for Privacy-aware Digital Pathology", "authors":[ "Kumari, Pratibha", "Reisenb\u00fcchler, Daniel", "Luttner, Lucas", "Schaadt, Nadine S.", "Feuerhake, Friedrich", "Merhof, Dorit" ], "id":"Conference", "arxiv_id":"2409.06455", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":162 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0423_paper.pdf", "bibtext":"@InProceedings{ Lin_Shortcut_MICCAI2024,\n author = { Lin, Manxi and Weng, Nina and Mikolaj, Kamil and Bashir, Zahra and Svendsen, Morten B. S. and Tolsgaard, Martin G. and Christensen, Anders Nymark and Feragen, Aasa },\n title = { { Shortcut Learning in Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Shortcut learning is a phenomenon where machine learning models prioritize learning simple, potentially misleading cues from data that do not generalize well beyond the training set. While existing research primarily investigates this in the realm of image classification, this study extends the exploration of shortcut learning into medical image segmentation. We demonstrate that clinical annotations such as calipers, and the combination of zero-padded convolutions and center-cropped training sets in the dataset can inadvertently serve as shortcuts, impacting segmentation accuracy. We identify and evaluate the shortcut learning on two different but common medical image segmentation tasks. In addition, we suggest strategies to mitigate the influence of shortcut learning and improve the generalizability of the segmentation models. By uncovering the presence and implications of shortcuts in medical image segmentation, we provide insights and methodologies for evaluating and overcoming this pervasive challenge and call for attention in the community for shortcuts in segmentation. Our code is public at https:\/\/github.com\/nina-weng\/shortcut_skinseg .", "title":"Shortcut Learning in Medical Image Segmentation", "authors":[ "Lin, Manxi", "Weng, Nina", "Mikolaj, Kamil", "Bashir, Zahra", "Svendsen, Morten B. S.", "Tolsgaard, Martin G.", "Christensen, Anders Nymark", "Feragen, Aasa" ], "id":"Conference", "arxiv_id":"2403.06748", "GitHub":[ "https:\/\/github.com\/nina-weng\/shortcut_skinseg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":163 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1181_paper.pdf", "bibtext":"@InProceedings{ Hao_EMFformer_MICCAI2024,\n author = { Hao, Zhaoquan and Quan, Hongyan and Lu, Yinbin },\n title = { { EMF-former: An Efficient and Memory-Friendly Transformer for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation is of significant importance for computer-aided diagnosis. In this task, methods based on Convolutional Neural Networks(CNNs) have shown good performance in extracting local features. However, they cannot capture global dependencies, which is crucial for medical image. On the other hand, Transformer-based methods can establish global dependencies through self-attention, providing a supplement to local convolution. However, the expensive matrix multiplication in the self-attention of a vanilla transformer and the memory usage is still a bottleneck. In this work, we propose a segmentation model named EMF-former. By combining DWConv, channel shuffle and PWConv, we design a Depthwise Separable Shuffled Convolution Module(DSPConv) to reduce the parameter count of convolutions. Additionally, we employ an efficient Vector Aggregation Attention (VAA) that substitutes key-value interactions with element-wise multiplication after broadcasting two vectors to reduce computational complexity. Moreover, we substitute the parallel multi-head attention module with the Serial Multi-Head Attention Module (S-MHA) to reduce feature redundancy and memory usage in multi-head attention. Combining the above modules, EMF-former could perform the medical image segmentation efficiently with fewer parameter counts, lower computational complexity and lower memory usage while preserving segmentation accuracy. We conduct experimental evaluations on ACDC and Hippocampus dataset, achieving mIOU values of 80.5% and 78.8%, respectively.", "title":"EMF-former: An Efficient and Memory-Friendly Transformer for Medical Image Segmentation", "authors":[ "Hao, Zhaoquan", "Quan, Hongyan", "Lu, Yinbin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":164 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2202_paper.pdf", "bibtext":"@InProceedings{ Mil_AutoSkull_MICCAI2024,\n author = { Milojevic, Aleksandar and Peter, Daniel and Huber, Niko B. and Azevedo, Luis and Latyshev, Andrei and Sailer, Irena and Gross, Markus and Thomaszewski, Bernhard and Solenthaler, Barbara and G\u00f6zc\u00fc, Baran },\n title = { { AutoSkull: Learning-based Skull Estimation for Automated Pipelines } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In medical imaging, accurately representing facial features is crucial for applications such as radiation-free medical visualizations and treatment simulations. We aim to predict skull shapes from 3D facial scans with high accuracy, prioritizing simplicity for seamless integration into automated pipelines. Our method trains an MLP network on PCA coefficients using data from registered skin- and skull-mesh pairs obtained from CBCT scans, which is then used to infer the skull shape for a given skin surface. By incorporating teeth positions as additional prior information extracted from intraoral scans, we further improve the accuracy of the model, outperforming previous work. We showcase a clinical application of our work, where the inferred skull information is used in an FEM model to compute the outcome of an orthodontic treatment.", "title":"AutoSkull: Learning-based Skull Estimation for Automated Pipelines", "authors":[ "Milojevic, Aleksandar", "Peter, Daniel", "Huber, Niko B.", "Azevedo, Luis", "Latyshev, Andrei", "Sailer, Irena", "Gross, Markus", "Thomaszewski, Bernhard", "Solenthaler, Barbara", "G\u00f6zc\u00fc, Baran" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":165 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2262_paper.pdf", "bibtext":"@InProceedings{ Zha_Biophysics_MICCAI2024,\n author = { Zhang, Lipei and Cheng, Yanqi and Liu, Lihao and Sch\u00f6nlieb, Carola-Bibiane and Aviles-Rivero, Angelica I },\n title = { { Biophysics Informed Pathological Regularisation for Brain Tumour Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in deep learning have significantly improved brain tumour segmentation techniques; however, the results still lack confidence and robustness as they solely consider image data without biophysical priors or pathological information. Integrating biophysics-informed regularisation is one effective way to change this situation, as it provides an prior regularisation for automated end-to-end learning. In this paper, we propose a novel approach that designs brain tumour growth Partial Differential Equation (PDE) models as a regularisation with deep learning, operational with any network model. Our method introduces tumour growth PDE models directly into the segmentation process, improving accuracy and robustness, especially in data-scarce scenarios. This system estimates tumour cell density using a periodic activation function. By effectively integrating this estimation with biophysical models, we achieve a better capture of tumour characteristics. This approach not only aligns the segmentation closer to actual biological behaviour but also strengthens the model\u2019s performance under limited data conditions. We demonstrate the effectiveness of our framework through extensive experiments on the BraTS 2023 dataset, showcasing significant improvements in both precision and reliability of tumour segmentation.", "title":"Biophysics Informed Pathological Regularisation for Brain Tumour Segmentation", "authors":[ "Zhang, Lipei", "Cheng, Yanqi", "Liu, Lihao", "Sch\u00f6nlieb, Carola-Bibiane", "Aviles-Rivero, Angelica I" ], "id":"Conference", "arxiv_id":"2403.09136", "GitHub":[ "https:\/\/github.com\/uceclz0\/biophy_brats" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":166 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1108_paper.pdf", "bibtext":"@InProceedings{ Hu_AScanning_MICCAI2024,\n author = { Hu, Yichen and Wang, Chao and Song, Weitao and Tiulpin, Aleksei and Liu, Qing },\n title = { { A Scanning Laser Ophthalmoscopy Image Database and Trustworthy Retinal Disease Detection Method } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Scanning laser ophthalmoscopy (SLO) images provide ophthalmologists with a non-invasive way to examine the retina for diagnostic and treatment purposes. Manual reading SLO images by ophthalmologists is a tedious task. Thus, developing trustworthy disease detection algorithms becomes urgent. However, up to now, there are no large-scale SLO image databases. In this paper, we collect and release a new SLO image dataset, named Retina-SLO, containing 7943 images of 4102 eyes from 2440 subjects with labels of three diseases, i.e., macular edema (ME), diabetic retinopathy (DR), and glaucoma. To our knowledge, Retina-SLO is the largest publicly available SLO image dataset for multiple retinal disease detection. While numerous deep learning-based methods for disease detection with medical images have been proposed, they ignore the model trust. Particularly, from a user\u2019s perspective, the detection model is highly untrustworthy if it makes inconsistent predictions on different SLO images of the same eye captured within relatively short time intervals. To solve this issue, we propose TrustDetector, a novel disease detection method, leveraging eye-wise consistency learning and rank-based contrastive learning to ensure consistent predictions and ordered representations aligned with disease severity levels on SLO images. Experimental results show that our TrustDetector achieves better detection performances and higher consistency than the state-of-the-arts. Dataset and code are available at https:\/\/drive.google.com\/drive\/TrustDetector\/Retina-SLO.", "title":"A Scanning Laser Ophthalmoscopy Image Database and Trustworthy Retinal Disease Detection Method", "authors":[ "Hu, Yichen", "Wang, Chao", "Song, Weitao", "Tiulpin, Aleksei", "Liu, Qing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":167 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1926_paper.pdf", "bibtext":"@InProceedings{ Wan_LSSNet_MICCAI2024,\n author = { Wang, Wei and Sun, Huiying and Wang, Xin },\n title = { { LSSNet: A Method for Colon Polyp Segmentation Based on Local Feature Supplementation and Shallow Feature Supplementation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate polyp segmentation methods are essential for colon polyp screening and colorectal cancer diagnosis. However, polyp segmentation faces the following challenges: (1) Small-sized polyps are easily lost during the identification process. (2) The boundaries separating the polyp from its surroundings are fuzzy. (3) Additional distracting information is introduced during the colonoscopy procedure, resulting in noise in the colonoscopy image and influencing the segmentation outcomes. To cope with these three challenges, a method for colon polyp segmentation based on local feature supplementation and shallow feature supplementation (LSSNet) is proposed by incorporating feature supplementation structures in the encoder-decoder structure. The multiscale feature extraction (MFE) module is designed to extract local features, the interlayer attention fusion (IAF) module is designed to fuse supplementary features with the current layer features, and the semantic gap reduction (SGR) module is designed to reduce the semantic gaps between the layers, which together form the local feature supplementation structure. The shallow feature supplementation (SFS) module is designed to supplement the features in the fuzzy areas. Based on these four modules LSSNet is proposed. LSSNet is evaluated on five datasets: ClinicDB, KvasirSEG, ETIS, ColonDB, and EndoScene. The results show that mDice scores are improved by 1.33%, 0.74%, 2.65%, 1.08%, and 0.62% respectively over the compared state-of-the-art methods. The codes are available at https:\/\/github.com\/heyeying\/LSSNet.", "title":"LSSNet: A Method for Colon Polyp Segmentation Based on Local Feature Supplementation and Shallow Feature Supplementation", "authors":[ "Wang, Wei", "Sun, Huiying", "Wang, Xin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/heyeying\/LSSNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":168 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0366_paper.pdf", "bibtext":"@InProceedings{ Liu_FedFMS_MICCAI2024,\n author = { Liu, Yuxi and Luo, Guibo and Zhu, Yuesheng },\n title = { { FedFMS: Exploring Federated Foundation Models for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation is crucial for clinical diagnosis. The Segmentation Anything Model (SAM) serves as a powerful foundation model for visual segmentation and can be adapted for medical image segmentation. However, medical imaging data typically contain privacy-sensitive information, making it challenging to train foundation models with centralized storage and sharing. To date, there are few foundation models tailored for medical image deployment within the federated learning framework, and the segmentation performance, as well as the efficiency of communication and training, remain unexplored. In response to these issues, we developed Federated Foundation models for Medical image Segmentation (FedFMS), which includes the Federated SAM (FedSAM) and a communication and training-efficient Federated SAM with Medical SAM Adapter (FedMSA). Comprehensive experiments on diverse datasets are conducted to investigate the performance disparities between centralized training and federated learning across various configurations of FedFMS. The experiments revealed that FedFMS could achieve performance comparable to models trained via centralized training methods while maintaining privacy. Furthermore, FedMSA demonstrated the potential to enhance communication and training efficiency. Our model implementation codes are available at https:\/\/github.com\/LIU-YUXI\/FedFMS.", "title":"FedFMS: Exploring Federated Foundation Models for Medical Image Segmentation", "authors":[ "Liu, Yuxi", "Luo, Guibo", "Zhu, Yuesheng" ], "id":"Conference", "arxiv_id":"2403.05408", "GitHub":[ "https:\/\/github.com\/LMIAPC\/FednnU-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":169 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1131_paper.pdf", "bibtext":"@InProceedings{ Esh_ESPA_MICCAI2024,\n author = { Eshaghzadeh Torbati, Mahbaneh and Minhas, Davneet S. and Tafti, Ahmad P. and DeCarli, Charles S. and Tudorascu, Dana L. and Hwang, Seong Jae },\n title = { { ESPA: An Unsupervised Harmonization Framework via Enhanced Structure Preserving Augmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The rising interest in pooling neuroimaging data from various sources presents challenges regarding scanner variability, known as scanner effects. While numerous harmonization methods aim to tackle these effects, they face issues with model robustness, brain structural modifications, and over-correction. To combat these issues, we propose a novel harmonization approach centered on simulating scanner effects through augmentation methods. This strategy enhances model robustness by providing extensive simulated matched data, comprising sets of images with similar brain but varying scanner effects. Our proposed method, ESPA, is an unsupervised harmonization framework via Enhanced Structure Preserving Augmentation. Additionally, we introduce two domain-adaptation augmentation: tissue-type contrast augmentation and GAN-based residual augmentation, both focusing on appearancebased changes to address structural modifications. While the former adapts images to the tissue-type contrast distribution of a target scanner, the latter generates residuals added to the original image for more complex scanner adaptation. These augmentations assist ESPA in mitigating over correction through data stratification or population matching strategies during augmentation configuration. Notably, we leverage our unique in-house matched dataset as a benchmark to compare ESPA against supervised and unsupervised state-of-the art (SOTA) harmonization methods. Our study marks the first attempt, to the best of our knowledge, to address harmonization by simulating scanner effects. Our results demonstrate the successful simulation of scanner effects, with ESPA outperforming SOTA methods using this harmonization approach.", "title":"ESPA: An Unsupervised Harmonization Framework via Enhanced Structure Preserving Augmentation", "authors":[ "Eshaghzadeh Torbati, Mahbaneh", "Minhas, Davneet S.", "Tafti, Ahmad P.", "DeCarli, Charles S.", "Tudorascu, Dana L.", "Hwang, Seong Jae" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Mahbaneh\/ESPA.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":170 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3622_paper.pdf", "bibtext":"@InProceedings{ Hej_Conditional_MICCAI2024,\n author = { Hejrati, Behzad and Banerjee, Soumyanil and Glide-Hurst, Carri and Dong, Ming },\n title = { { Conditional diffusion model with spatial attention and latent embedding for medical image segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models have been used extensively for high quality image and video generation tasks. In this paper, we propose a novel conditional diffusion model with spatial attention and latent embedding (cDAL) for medical image segmentation. In cDAL, a convolutional neural network (CNN) based discriminator is used at every time-step of the diffusion process to distinguish between the generated labels and the real ones. A spatial attention map is computed based on the features learned by the discriminator to help cDAL generate more accurate segmentation of discriminative regions in an input image. Additionally, we incorporated a random latent embedding into each layer of our model to significantly reduce the number of training and sampling time-steps, thereby making it much faster than other diffusion models for image segmentation. We applied cDAL on 3 publicly available medical image segmentation datasets (MoNuSeg, Chest X-ray and Hippocampus) and observed significant qualitative and quantitative improvements with higher Dice scores and mIoU over the state-of-the-art algorithms. The source code is publicly available at https:\/\/github.com\/Hejrati\/cDAL\/.", "title":"Conditional diffusion model with spatial attention and latent embedding for medical image segmentation", "authors":[ "Hejrati, Behzad", "Banerjee, Soumyanil", "Glide-Hurst, Carri", "Dong, Ming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Hejrati\/cDAL\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":171 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1464_paper.pdf", "bibtext":"@InProceedings{ Jau_Anatomyguided_MICCAI2024,\n author = { Jaus, Alexander and Seibold, Constantin and Rei\u00df, Simon and Heine, Lukas and Schily, Anton and Kim, Moon and Bahnsen, Fin Hendrik and Herrmann, Ken and Stiefelhagen, Rainer and Kleesiek, Jens },\n title = { { Anatomy-guided Pathology Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pathological structures in medical images are typically deviations from the expected anatomy of a patient. \nWhile clinicians consider this interplay between anatomy and pathology, recent deep learning algorithms specialize in recognizing either one of the two, rarely considering the patient\u2019s body from such a joint perspective.\nIn this paper, we develop a generalist segmentation model that combines anatomical and pathological information, aiming to enhance the segmentation accuracy of pathological features.\nOur Anatomy-Pathology Exchange (APEx) training utilizes a query-based segmentation transformer which decodes a joint feature space into query-representations for human anatomy and interleaves them via a mixing strategy into the pathology-decoder for anatomy-informed pathology predictions. \nIn doing so, we are able to report the best results across the board on FDG-PET-CT and Chest X-Ray pathology segmentation tasks with a margin of up to 3.3% as compared to strong baseline methods. Code and models are available at github.com\/alexanderjaus\/APEx.", "title":"Anatomy-guided Pathology Segmentation", "authors":[ "Jaus, Alexander", "Seibold, Constantin", "Rei\u00df, Simon", "Heine, Lukas", "Schily, Anton", "Kim, Moon", "Bahnsen, Fin Hendrik", "Herrmann, Ken", "Stiefelhagen, Rainer", "Kleesiek, Jens" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/alexanderjaus\/APEx" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":172 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1002_paper.pdf", "bibtext":"@InProceedings{ Xia_Mitigating_MICCAI2024,\n author = { Xia, Tian and Roschewitz, Me\u0301lanie and De Sousa Ribeiro, Fabio and Jones, Charles and Glocker, Ben },\n title = { { Mitigating attribute amplification in counterfactual image generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Causal generative modelling is gaining interest in medical imaging due to its ability to answer interventional and counterfactual queries. Most work focuses on generating counterfactual images that look plausible, using auxiliary classifiers to enforce effectiveness of simulated interventions. We investigate pitfalls in this approach, discovering the issue of attribute amplification, where unrelated attributes are spuriously affected during interventions, leading to biases across protected characteristics and disease status. We show that attribute amplification is caused by the use of hard labels in the counterfactual training process and propose soft counterfactual fine-tuning to mitigate this issue. Our method substantially reduces the amplification effect while maintaining effectiveness of generated images, demonstrated on a large chest X-ray dataset. Our work makes an important advancement towards more faithful and unbiased causal modelling in medical imaging. Code available at https:\/\/github.com\/biomedia-mira\/attribute-amplification.", "title":"Mitigating attribute amplification in counterfactual image generation", "authors":[ "Xia, Tian", "Roschewitz, Me\u0301lanie", "De Sousa Ribeiro, Fabio", "Jones, Charles", "Glocker, Ben" ], "id":"Conference", "arxiv_id":"2403.09422", "GitHub":[ "https:\/\/github.com\/biomedia-mira\/attribute-amplification" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":173 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2587_paper.pdf", "bibtext":"@InProceedings{ Qia_Towards_MICCAI2024,\n author = { Qian, Kui and Qiao, Litao and Friedman, Beth and O\u2019Donnell, Edward and Kleinfeld, David and Freund, Yoav },\n title = { { Towards Explainable Automated Neuroanatomy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present a novel method for quantifying the microscopic structure of brain tissue. \nIt is based on the automated recognition of interpretable features obtained by analyzing the shapes of cells. This contrasts with prevailing methods of brain anatomical analysis in two ways. \nFirst, contemporary methods use gray-scale values derived from smoothed version of the anatomical images, which dissipated valuable information from the texture of the images. \nSecond, contemporary analysis uses the output of black-box Convolutional Neural Networks, while our system makes decisions based on interpretable features obtained by analyzing the shapes of individual cells. \nAn important benefit of this open-box approach is that the anatomist can understand and correct the decisions made by the computer.\nOur proposed system can accurately localize and identify existing brain structures. \nThis can be used to align and coregistar brains and will facilitate connectomic studies for reverse engineering of brain circuitry.", "title":"Towards Explainable Automated Neuroanatomy", "authors":[ "Qian, Kui", "Qiao, Litao", "Friedman, Beth", "O\u2019Donnell, Edward", "Kleinfeld, David", "Freund, Yoav" ], "id":"Conference", "arxiv_id":"2404.05814", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":174 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1569_paper.pdf", "bibtext":"@InProceedings{ Liu_CUTS_MICCAI2024,\n author = { Liu, Chen and Amodio, Matthew and Shen, Liangbo L. and Gao, Feng and Avesta, Arman and Aneja, Sanjay and Wang, Jay C. and Del Priore, Lucian V. and Krishnaswamy, Smita },\n title = { { CUTS: A Deep Learning and Topological Framework for Multigranular Unsupervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmenting medical images is critical to facilitating both patient diagnoses and quantitative research. A major limiting factor is the lack of labeled data, as obtaining expert annotations for each new set of imaging data and task can be labor intensive and inconsistent among annotators. We present CUTS, an unsupervised deep learning framework for medical image segmentation. CUTS operates in two stages. For each image, it produces an embedding map via intra-image contrastive learning and local patch reconstruction. Then, these embeddings are partitioned at dynamic granularity levels that correspond to the data topology. CUTS yields a series of coarse-to-fine-grained segmentations that highlight features at various granularities. We applied CUTS to retinal fundus images and two types of brain MRI images to delineate structures and patterns at different scales. When evaluated against predefined anatomical masks, CUTS improved the dice coefficient and Hausdorff distance by at least 10% compared to existing unsupervised methods. Finally, CUTS showed performance on par with Segment Anything Models (SAM, MedSAM, SAM-Med2D) pre-trained on gigantic labeled datasets.", "title":"CUTS: A Deep Learning and Topological Framework for Multigranular Unsupervised Medical Image Segmentation", "authors":[ "Liu, Chen", "Amodio, Matthew", "Shen, Liangbo L.", "Gao, Feng", "Avesta, Arman", "Aneja, Sanjay", "Wang, Jay C.", "Del Priore, Lucian V.", "Krishnaswamy, Smita" ], "id":"Conference", "arxiv_id":"2209.11359", "GitHub":[ "https:\/\/github.com\/KrishnaswamyLab\/CUTS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":175 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3636_paper.pdf", "bibtext":"@InProceedings{ Aka_CheXtriev_MICCAI2024,\n author = { Akash R. J., Naren and Tadanki, Arihanth and Sivaswamy, Jayanthi },\n title = { { CheXtriev: Anatomy-Centered Representation for Case-Based Retrieval of Chest Radiographs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present CheXtriev, a graph-based, anatomy-aware framework for chest radiograph retrieval. Unlike prior methods focussed on global features, our method leverages graph transformers to extract informative features from specific anatomical regions. Furthermore, it captures spatial context and the interplay between anatomical location and findings. This contextualization, grounded in evidence-based anatomy, results in a richer anatomy-aware representation and leads to more accurate, effective and efficient retrieval, particularly for less prevalent findings. CheXtriv outperforms state-of-the-art global and local approaches by 18% to 26% in retrieval accuracy and 11% to 23% in ranking quality.", "title":"CheXtriev: Anatomy-Centered Representation for Case-Based Retrieval of Chest Radiographs", "authors":[ "Akash R. J., Naren", "Tadanki, Arihanth", "Sivaswamy, Jayanthi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cvit-mip\/chextriev" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":176 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4152_paper.pdf", "bibtext":"@InProceedings{ Ju_Universal_MICCAI2024,\n author = { Ju, Lie and Wu, Yicheng and Feng, Wei and Yu, Zhen and Wang, Lin and Zhu, Zhuoting and Ge, Zongyuan },\n title = { { Universal Semi-Supervised Learning for Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning (SSL) has attracted much attention since it reduces the expensive costs of collecting adequate well-labeled training data, especially for deep learning methods. However, traditional SSL is built upon an assumption that labeled and unlabeled data should be from the same distribution e.g., classes and domains. However, in practical scenarios, unlabeled data would be from unseen classes or unseen domains, and it is still challenging to exploit them by existing SSL methods. Therefore, in this paper, we proposed a unified framework to leverage these unseen unlabeled data for open-scenario semi-supervised medical image classification. We first design a novel scoring mechanism, called dual-path outliers estimation, to identify samples from unseen classes. Meanwhile, to extract unseen-domain samples, we then apply an effective variational autoencoder (VAE) pre-training. \nAfter that, we conduct domain adaptation to fully exploit the value of the detected unseen-domain samples to boost semi-supervised training. We evaluated our proposed framework on dermatology and ophthalmology tasks. Extensive experiments demonstrate our model can achieve superior classification performance in various medical SSL scenarios.", "title":"Universal Semi-Supervised Learning for Medical Image Classification", "authors":[ "Ju, Lie", "Wu, Yicheng", "Feng, Wei", "Yu, Zhen", "Wang, Lin", "Zhu, Zhuoting", "Ge, Zongyuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/PyJulie\/USSL4MIC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":177 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3350_paper.pdf", "bibtext":"@InProceedings{ Hu_LGA_MICCAI2024,\n author = { Hu, Jihong and Li, Yinhao and Sun, Hao and Song, Yu and Zhang, Chujie and Lin, Lanfen and Chen, Yen-Wei },\n title = { { LGA: A Language Guide Adapter for Advancing the SAM Model\u2019s Capabilities in Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In addressing the unique challenges of medical image segmentation, foundation models like the Segment Anything Model (SAM), originally developed for natural image, often falter due to the distinct nature of medical images. This study introduces the Language Guided Adapter (LGA), a paremeter efficient fine-tuning approach that extends SAM\u2019s utility to medical segmentation tasks. Through the integration of textual data from medical reports via a pretrained Bert model into embeddings, LGA combines these embeddings with the image features in SAM\u2019s image encoder using Feature Fusion Modules (FFM). Our method significantly enhances model performance and reduces computational overhead by freezing most parameters during the fine-tuning process. Evaluated on the CT-based MosMedData+ and the X-ray dataset QaTa-COV19, LGA demonstrates its effectiveness and adaptability, achieving competitive results with a significant reduction in the number of parameters required for fine-tuning compared to SOTA medical segmentation models. This enhancement underscores the potential of foundation models, leveraging the integration of multimodal knowledge as a pivotal approach for application in specialized medical tasks, thus charting a course towards more precise and adaptable diagnostic methodologies.", "title":"LGA: A Language Guide Adapter for Advancing the SAM Model\u2019s Capabilities in Medical Image Segmentation", "authors":[ "Hu, Jihong", "Li, Yinhao", "Sun, Hao", "Song, Yu", "Zhang, Chujie", "Lin, Lanfen", "Chen, Yen-Wei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JiHooooo\/LGA\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":178 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3227_paper.pdf", "bibtext":"@InProceedings{ Pen_Advancing_MICCAI2024,\n author = { Peng, Qiong and Lin, Weiping and Hu, Yihuang and Bao, Ailisi and Lian, Chenyu and Wei, Weiwei and Yue, Meng and Liu, Jingxin and Yu, Lequan and Wang, Liansheng },\n title = { { Advancing H&E-to-IHC Virtual Staining with Task-Specific Domain Knowledge for HER2 Scoring } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"The assessment of HER2 expression is crucial in diagnosing breast cancer. Staining pathological tissues with immunohistochemistry (IHC) is a critically pivotal step in the assessment procedure, while it is expensive and time-consuming. Recently, generative models have emerged as a novel paradigm for virtual staining from hematoxylin-eosin (H&E) to IHC. Unlike traditional image translation tasks, virtual staining in IHC for HER2 scoring requires greater attention to regions like nuclei and stained membranes, informed by task-specific domain knowledge. Unfortunately, most existing virtual staining methods overlook this point. In this paper, we propose a novel generative adversarial network (GAN) based solution that incorporates specific knowledge of HER2 scoring, i.e., nuclei distribution and membrane staining intensity. We introduce a nuclei density estimator to learn the nuclei distribution and thus facilitate the cell alignment between the real and generated images by an auxiliary regularization branch. Moreover, another branch is tailored to focus on the stained membranes, ensuring a more consistent membrane staining intensity. We collect RegH2I, a dataset comprising 2592 pairs of registered H&E-IHC images and conduct extensive experiments to evaluate our approach, including H&E-to-IHC virtual staining on internal and external datasets, nuclei distribution and membrane staining intensity analysis, as well as downstream tasks for generated images. The results demonstrate that our method achieves superior performance than existing methods. Code and dataset are released at https:\/\/github.com\/balball\/TDKstain.", "title":"Advancing H E-to-IHC Virtual Staining with Task-Specific Domain Knowledge for HER2 Scoring", "authors":[ "Peng, Qiong", "Lin, Weiping", "Hu, Yihuang", "Bao, Ailisi", "Lian, Chenyu", "Wei, Weiwei", "Yue, Meng", "Liu, Jingxin", "Yu, Lequan", "Wang, Liansheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/balball\/TDKstain" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":179 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3238_paper.pdf", "bibtext":"@InProceedings{ Guo_Unsupervised_MICCAI2024,\n author = { Guo, Juncheng and Lin, Jianxin and Tan, Guanghua and Lu, Yuhuan and Gao, Zhan and Li, Shengli and Li, Kenli },\n title = { { Unsupervised Ultrasound Image Quality Assessment with Score Consistency and Relativity Co-learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Selecting an optimal standard plane in prenatal ultrasound is crucial for improving the accuracy of AI-assisted diagnosis. Existing approaches, typically dependent on detecting the presence of anatomical structures as defined by clinical protocols, have been constrained by a lack of consideration for image perceptual quality. Although supervised training with manually labeled quality scores seems feasible, the subjective nature and unclear definition of these scores make such learning error-prone and manual labeling excessively time-consuming. In this paper, we present an unsupervised ultrasound image quality assessment method with score consistency and relativity co-learning (CRL-UIQA). Our approach generates pseudo-labels by calculating feature distribution distances between ultrasound images and high-quality standard planes, leveraging consistency and relativity for training regression networks in quality prediction. Extensive experiments on the dataset demonstrate the impressive performance of the proposed CRL-UIQA, showcasing excellent generalization across diverse plane images.", "title":"Unsupervised Ultrasound Image Quality Assessment with Score Consistency and Relativity Co-learning", "authors":[ "Guo, Juncheng", "Lin, Jianxin", "Tan, Guanghua", "Lu, Yuhuan", "Gao, Zhan", "Li, Shengli", "Li, Kenli" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":180 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1448_paper.pdf", "bibtext":"@InProceedings{ Zen_Realistic_MICCAI2024,\n author = { Zeng, Tianle and Loza Galindo, Gerardo and Hu, Junlei and Valdastri, Pietro and Jones, Dominic },\n title = { { Realistic Surgical Image Dataset Generation Based On 3D Gaussian Splatting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Computer vision technologies markedly enhance the automation capabilities of robotic-assisted minimally invasive surgery (RAMIS) through advanced tool tracking, detection, and localization. However, the limited availability of comprehensive surgical datasets for training represents a significant challenge in this field. This research introduces a novel method that employs 3D Gaussian Splatting to generate synthetic surgical datasets. We propose a method for extracting and combining 3D Gaussian representations of surgical instruments and background operating environments, transforming and combining them to generate high-fidelity synthetic surgical scenarios. We developed a data recording system capable of acquiring images alongside tool and camera poses in a surgical scene. Using this pose data, we synthetically replicate the scene, thereby enabling direct comparisons of the synthetic image quality (27.796\u00b11.796 PSNR). As a further validation, we compared two YOLOv5 models trained on the synthetic and real data, respectively, and assessed their performance in an unseen real-world test dataset. Comparing the performances, we observe an improvement in neural network performance, with the synthetic-trained model outperforming the real-world trained model by 12%, testing both on real-world data.", "title":"Realistic Surgical Image Dataset Generation Based On 3D Gaussian Splatting", "authors":[ "Zeng, Tianle", "Loza Galindo, Gerardo", "Hu, Junlei", "Valdastri, Pietro", "Jones, Dominic" ], "id":"Conference", "arxiv_id":"2407.14846", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":181 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0225_paper.pdf", "bibtext":"@InProceedings{ Cui_EndoDAC_MICCAI2024,\n author = { Cui, Beilei and Islam, Mobarakol and Bai, Long and Wang, An and Ren, Hongliang },\n title = { { EndoDAC: Efficient Adapting Foundation Model for Self-Supervised Depth Estimation from Any Endoscopic Camera } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Depth estimation plays a crucial role in various tasks within endoscopic surgery, including navigation, surface reconstruction, and augmented reality visualization. Despite the significant achievements of foundation models in vision tasks, including depth estimation, their direct application to the medical domain often results in suboptimal performance. This highlights the need for efficient adaptation methods to adapt these models to endoscopic depth estimation. We propose Endoscopic Depth Any Camera (EndoDAC) which is an efficient self-supervised depth estimation framework that adapts foundation models to endoscopic scenes. Specifically, we develop the Dynamic Vector-Based Low-Rank Adaptation (DV-LoRA) and employ Convolutional Neck blocks to tailor the foundational model to the surgical domain, utilizing remarkably few trainable parameters. Given that camera information is not always accessible, we also introduce a self-supervised adaptation strategy that estimates camera intrinsics using the pose encoder. Our framework is capable of being trained solely on monocular surgical videos from any camera, ensuring minimal training costs. Experiments demonstrate that our approach obtains superior performance even with fewer training epochs and unaware of the ground truth camera intrinsics. Code is available at https:\/\/github.com\/BeileiCui\/EndoDAC.", "title":"EndoDAC: Efficient Adapting Foundation Model for Self-Supervised Depth Estimation from Any Endoscopic Camera", "authors":[ "Cui, Beilei", "Islam, Mobarakol", "Bai, Long", "Wang, An", "Ren, Hongliang" ], "id":"Conference", "arxiv_id":"2405.08672", "GitHub":[ "https:\/\/github.com\/BeileiCui\/EndoDAC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":182 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0254_paper.pdf", "bibtext":"@InProceedings{ Sun_Continually_MICCAI2024,\n author = { Sun, Yihua and Khor, Hee Guan and Wang, Yuanzheng and Wang, Zhuhao and Zhao, Hongliang and Zhang, Yu and Ma, Longfei and Zheng, Zhuozhao and Liao, Hongen },\n title = { { Continually Tuning a Large Language Model for Multi-domain Radiology Report Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Large language models (LLMs) have demonstrated potential across various tasks, including vision-language applications like chest X-ray (XR) report generation (RG) in healthcare. Recent RG approaches focus on optimizing model performance for a single dataset with a single XR modality, often neglecting the critical area of computed tomography (CT) report generation. The challenge is compounded by medical datasets being isolated across different centers, making comprehensive collection difficult. Furthermore, LLMs trained on datasets sequentially can experience catastrophic forgetting. In this paper, we move beyond conventional approaches of training on a single dataset, and focus on improving the overall performance on sequentially collected multi-center datasets. We incorporate four datasets with diverse languages and image modalities for the experiments. Our approach utilizes a minimal number of task-specific learnable weights within an LLM-based RG method for each domain, maintaining the majority of weights frozen to avoid forgetting. Utilizing LLMs\u2019 multilingual generalizability, we align models and facilitate knowledge sharing through a multi-label supervised contrastive loss within the LLM hidden space. We design a 2D-3D adapter for the image encoder to transfer from XR to CT RG tasks. A CT disease graph is established for transferring knowledge from XR to CT RG tasks, using CT\u2019s most relevant XR disease class centers in a triplet loss. Extensive experiments validate our design.", "title":"Continually Tuning a Large Language Model for Multi-domain Radiology Report Generation", "authors":[ "Sun, Yihua", "Khor, Hee Guan", "Wang, Yuanzheng", "Wang, Zhuhao", "Zhao, Hongliang", "Zhang, Yu", "Ma, Longfei", "Zheng, Zhuozhao", "Liao, Hongen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":183 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0793_paper.pdf", "bibtext":"@InProceedings{ Li_Semisupervised_MICCAI2024,\n author = { Li, Haoshen and Wang, Yirui and Zhu, Jie and Guo, Dazhou and Yu, Qinji and Yan, Ke and Lu, Le and Ye, Xianghua and Zhang, Li and Wang, Qifeng and Jin, Dakai },\n title = { { Semi-supervised Lymph Node Metastasis Classification with Pathology-guided Label Sharpening and Two-streamed Multi-scale Fusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diagnosis of lymph node (LN) metastasis in CT scans is an essential yet challenging task for esophageal cancer staging and treatment planning. Deep learning methods can potentially address this issue by learning from large-scale, accurately labeled data. However, even for highly experienced physicians, only a portion of LN metastases can be accurately determined in CT. Previous work conducted supervised training with a relatively small number of annotated LNs and achieved limited performance. In our work, we leverage the teacher-student semi-supervised paradigm and explore the potential of using a large amount of unlabeled LNs in performance improvement. For unlabeled LNs, pathology reports can indicate the presence of LN metastases within the lymph node station (LNS). Hence, we propose a pathology-guided label sharpening loss by combining the metastasis status of LNS from pathology reports with predictions of the teacher model. This combination assigns pseudo labels for LNs with high confidence and then the student model is updated for better performance. Besides, to improve the initial performance of the teacher model, we propose a two-stream multi-scale feature fusion deep network that effectively fuses the local and global LN characteristics to learn from labeled LNs. Extensive four-fold cross-validation is conducted on a patient cohort of 1052 esophageal cancer patients with corresponding pathology reports and 9961 LNs (3635 labeled and 6326 unlabeled). The results demonstrate that our proposed method markedly outperforms previous state-of-the-art methods by 2.95\\% (from 90.23\\% to 93.18\\%) in terms of the area under the receiver operating characteristic curve (AUROC) metric on this challenging task.", "title":"Semi-supervised Lymph Node Metastasis Classification with Pathology-guided Label Sharpening and Two-streamed Multi-scale Fusion", "authors":[ "Li, Haoshen", "Wang, Yirui", "Zhu, Jie", "Guo, Dazhou", "Yu, Qinji", "Yan, Ke", "Lu, Le", "Ye, Xianghua", "Zhang, Li", "Wang, Qifeng", "Jin, Dakai" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":184 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2542_paper.pdf", "bibtext":"@InProceedings{ Xia_Conditional_MICCAI2024,\n author = { Xiao, Qing and Yoon, Siyeop and Ren, Hui and Tivnan, Matthew and Sun, Lichao and Li, Quanzheng and Liu, Tianming and Zhang, Yu and Li, Xiang },\n title = { { Conditional Score-Based Diffusion Model for Cortical Thickness Trajectory Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Alzheimer\u2019s Disease (AD) is a neurodegenerative condition characterized by diverse progression rates among individuals, with changes in cortical thickness (CTh) closely linked to its progression. Accurately forecasting CTh trajectories can significantly enhance early diagnosis and intervention strategies, providing timely care. However, the longitudinal data essential for these studies often suffer from temporal sparsity and incompleteness, presenting substantial challenges in modeling the disease\u2019s progression accurately. Existing methods are limited, focusing primarily on datasets without missing entries or requiring predefined assumptions about CTh progression. To overcome these obstacles, we propose a conditional score-based diffusion model specifically designed to generate CTh trajectories with the given baseline information, such as age, sex, and initial diagnosis. Our conditional diffusion model utilizes all available data during the training phase to make predictions based solely on baseline information during inference without needing prior history about CTh progression. The prediction accuracy of the proposed CTh prediction pipeline using a conditional score-based model was compared for sub-groups consisting of cognitively normal, mild cognitive impairment, and AD subjects. The Bland-Altman analysis shows our diffusion-based prediction model has a near-zero bias with narrow 95% confidential interval compared to the ground-truth CTh in 6-36 months. In addition, our conditional diffusion model has a stochastic generative nature, therefore, we demonstrated an uncertainty analysis of patient-specific CTh prediction through multiple realizations. Our code is available at https:\/\/github.com\/siyeopyoon\/Diffusion-Cortical-Thickness-Trajectory.", "title":"Conditional Score-Based Diffusion Model for Cortical Thickness Trajectory Prediction", "authors":[ "Xiao, Qing", "Yoon, Siyeop", "Ren, Hui", "Tivnan, Matthew", "Sun, Lichao", "Li, Quanzheng", "Liu, Tianming", "Zhang, Yu", "Li, Xiang" ], "id":"Conference", "arxiv_id":"2403.06940", "GitHub":[ "https:\/\/github.com\/siyeopyoon\/Diffusion-Cortical-Thickness-Trajectory" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":185 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1531_paper.pdf", "bibtext":"@InProceedings{ Bir_HUP3D_MICCAI2024,\n author = { Birlo, Manuel and Caramalau, Razvan and Edwards, Philip J. \u201cEddie\u201d and Dromey, Brian and Clarkson, Matthew J. and Stoyanov, Danail },\n title = { { HUP-3D: A 3D multi-view synthetic dataset for assisted-egocentric hand-ultrasound-probe pose estimation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present HUP-3D, a 3D multiview multimodal synthetic dataset for hand ultrasound (US) probe pose estimation in the context of obstetric ultrasound. \nEgocentric markerless 3D joint pose estimation has potential applications in mixed reality medical education. The ability to understand hand and probe movements opens the door to tailored guidance and mentoring applications. \nOur dataset consists of over 31k sets of RGB, depth, and segmentation mask frames, including pose-related reference data, with an emphasis on image diversity and complexity. Adopting a camera viewpoint-based sphere concept allows us to capture a variety of views and generate multiple hand grasps poses using a pre-trained network. Additionally, our approach includes a software-based image rendering concept, enhancing diversity with various hand and arm textures, lighting conditions, and background images. \nWe validated our proposed dataset with state-of-the-art learning models and we obtained the lowest hand-object keypoint errors.\nThe supplementary material details the parameters for sphere-based camera view angles and the grasp generation and rendering pipeline configuration. The source code for our grasp generation and rendering pipeline, along with the dataset, is publicly available at https:\/\/manuelbirlo.github.io\/HUP-3D\/.", "title":"HUP-3D: A 3D multi-view synthetic dataset for assisted-egocentric hand-ultrasound-probe pose estimation", "authors":[ "Birlo, Manuel", "Caramalau, Razvan", "Edwards, Philip J. \u201cEddie\u201d", "Dromey, Brian", "Clarkson, Matthew J.", "Stoyanov, Danail" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/manuelbirlo\/US_GrabNet_grasp_generation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":186 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2885_paper.pdf", "bibtext":"@InProceedings{ Zhe_Misaligned_MICCAI2024,\n author = { Zheng, Jieyu and Li, Xiaojian and Mo, Hangjie and Li, Ling and Ma, Xiang },\n title = { { Misaligned 3D Texture Optimization in MIS Utilizing Generative Framework } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Three-dimensional reconstruction of the surgical area based on intraoperative laparoscopic videos can restore 2D information to 3D space, providing a solid technical foundation for many applications in computer-assisted surgery. SLAM methods often suffer from imperfect pose estimation and tissue motion, leading to the loss of original texture information. On the other hand, methods like Neural Radiance Fields and 3D Gaussian Split require offline processing and lack generalization capabilities. To overcome these limitations, we explore a texture optimization method that generates high resolution and continuous texture. It designs a mechanism for transforming 3D point clouds into 2D texture space and utilizes a generative network architecture to design 2D registration and image fusion modules. Experimental results and comparisons with state-of-the-art techniques demonstrate the effectiveness of this method in preserving the high-fidelity texture.", "title":"Misaligned 3D Texture Optimization in MIS Utilizing Generative Framework", "authors":[ "Zheng, Jieyu", "Li, Xiaojian", "Mo, Hangjie", "Li, Ling", "Ma, Xiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":187 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0787_paper.pdf", "bibtext":"@InProceedings{ Yan_AllInOne_MICCAI2024,\n author = { Yang, Zhiwen and Chen, Haowei and Qian, Ziniu and Yi, Yang and Zhang, Hui and Zhao, Dan and Wei, Bingzheng and Xu, Yan },\n title = { { All-In-One Medical Image Restoration via Task-Adaptive Routing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Although single-task medical image restoration (MedIR) has witnessed remarkable success, the limited generalizability of these methods poses a substantial obstacle to wider application. In this paper, we focus on the task of all-in-one medical image restoration, aiming to address multiple distinct MedIR tasks with a single universal model. Nonetheless, due to significant differences between different MedIR tasks, training a universal model often encounters task interference issues, where different tasks with shared parameters may conflict with each other in the gradient update direction. This task interference leads to deviation of the model update direction from the optimal path, thereby affecting the model\u2019s performance. To tackle this issue, we propose a task-adaptive routing strategy, allowing conflicting tasks to select different network paths in spatial and channel dimensions, thereby mitigating task interference. Experimental results demonstrate that our proposed \\textbf{A}ll-in-one \\textbf{M}edical \\textbf{I}mage \\textbf{R}estoration (\\textbf{AMIR}) network achieves state-of-the-art performance in three MedIR tasks: MRI super-resolution, CT denoising, and PET synthesis, both in single-task and all-in-one settings. The code and data will be available at \\href{https:\/\/github.com\/Yaziwel\/All-In-One-Medical-Image-Restoration-via-Task-Adaptive-Routing.git}{https:\/\/github.com\/Yaziwel\/AMIR}.", "title":"All-In-One Medical Image Restoration via Task-Adaptive Routing", "authors":[ "Yang, Zhiwen", "Chen, Haowei", "Qian, Ziniu", "Yi, Yang", "Zhang, Hui", "Zhao, Dan", "Wei, Bingzheng", "Xu, Yan" ], "id":"Conference", "arxiv_id":"2405.19769", "GitHub":[ "https:\/\/github.com\/Yaziwel\/All-In-One-Medical-Image-Restoration-via-Task-Adaptive-Routing.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":188 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2053_paper.pdf", "bibtext":"@InProceedings{ Isl_ANovel_MICCAI2024,\n author = { Islam, Saahil and Murthy, Venkatesh N. and Neumann, Dominik and Cimen, Serkan and Sharma, Puneet and Maier, Andreas and Comaniciu, Dorin and Ghesu, Florin C. },\n title = { { A Novel Tracking Framework for Devices in X-ray Leveraging Supplementary Cue-Driven Self-Supervised Features } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"To restore proper blood flow in blocked coronary arteries via angioplasty procedure, accurate placement of devices such as catheters, balloons, and stents under live Fluoroscopy or diagnostic Angiography is crucial. Identified Balloon markers help in enhancing stent visibility in X-ray sequences, while the Catheter tip aids in precise navigation and co-registering vessel structures, reducing the need for contrast in angiography. However, accurate detection of these devices in interventional X-ray sequences faces significant challenges, particularly due to occlusions from contrasted vessels and other devices and distractions from surrounding, resulting in the failure to track such small objects. While most tracking methods rely on spatial correlation of past and current appearance, they often lack strong motion comprehension essential for navigating through these challenging conditions, and fail to effectively detect multiple instances in the scene. To overcome these limitations, we propose a self-supervised learning approach that enhances its spatio-temporal understanding by incorporating supplementary cues and learning across multiple representation spaces on a large dataset. Followed by that, we introduce a generic real-time tracking framework that effectively leverages the pretrained spatio-temporal network and also takes the historical appearance and trajectory data into account. This results in enhanced localization of multiple instances of device landmarks. Our method outperforms state-of-the-art methods in interventional X-ray device tracking, especially stability and robustness, achieving an 87% reduction in max error for balloon marker detection and a 61% reduction in max error for catheter tip detection.", "title":"A Novel Tracking Framework for Devices in X-ray Leveraging Supplementary Cue-Driven Self-Supervised Features", "authors":[ "Islam, Saahil", "Murthy, Venkatesh N.", "Neumann, Dominik", "Cimen, Serkan", "Sharma, Puneet", "Maier, Andreas", "Comaniciu, Dorin", "Ghesu, Florin C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":189 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2125_paper.pdf", "bibtext":"@InProceedings{ Dom_Diffusion_MICCAI2024,\n author = { Dom\u00ednguez, Marina and Velikova, Yordanka and Navab, Nassir and Azampour, Mohammad Farid },\n title = { { Diffusion as Sound Propagation: Physics-inspired Model for Ultrasound Image Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning (DL) methods typically require large datasets to effectively learn data distributions. However, in the medical field, data is often limited in quantity, and acquiring labeled data can be costly. To mitigate this data scarcity, data augmentation techniques are commonly employed. Among these techniques, generative models play a pivotal role in expanding datasets. However, when it comes to ultrasound (US) imaging, the authenticity of generated data often diminishes due to the oversight of ultrasound physics.", "title":"Diffusion as Sound Propagation: Physics-inspired Model for Ultrasound Image Generation", "authors":[ "Dom\u00ednguez, Marina", "Velikova, Yordanka", "Navab, Nassir", "Azampour, Mohammad Farid" ], "id":"Conference", "arxiv_id":"2407.05428", "GitHub":[ "https:\/\/github.com\/marinadominguez\/diffusion-for-us-images" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":190 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2245_paper.pdf", "bibtext":"@InProceedings{ Li_Nonrigid_MICCAI2024,\n author = { Li, Qi and Shen, Ziyi and Yang, Qianye and Barratt, Dean C. and Clarkson, Matthew J. and Vercauteren, Tom and Hu, Yipeng },\n title = { { Nonrigid Reconstruction of Freehand Ultrasound without a Tracker } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstructing 2D freehand Ultrasound (US) frames into 3D space without using a tracker has recently seen advances with deep learning. Predicting good frame-to-frame rigid transformations is often accepted as the learning objective, especially when the ground-truth labels from spatial tracking devices are inherently rigid transformations. Motivated by a) the observed nonrigid deformation due to soft tissue motion during scanning, and b) the highly sensitive prediction of rigid transformation, this study investigates the methods and their benefits in predicting nonrigid transformations for reconstructing 3D US. We propose a novel co-optimisation algorithm for simultaneously estimating rigid transformations among US frames, supervised by ground-truth from a tracker, and a nonrigid deformation, optimised by a regularised registration network. We show that these two objectives can be either optimised using meta-learning or combined by weighting. A fast scattered data interpolation is also developed for enabling frequent reconstruction and registration of non-parallel US frames, during training. With a new data set containing over 357,000 frames in 720 scans, acquired from 60 subjects, the experiments demonstrate that, due to an expanded thus easier-to-optimise solution space, the generalisation is improved with the added deformation estimation, with respect to the rigid ground-truth. The global pixel reconstruction error (assessing accumulative prediction) is lowered from 18.48 to 16.51 mm, compared with baseline rigid-transformation-predicting methods. Using manually identified landmarks, the proposed co-optimisation also shows potentials in compensating nonrigid tissue motion at inference, which is not measurable by tracker-provided ground-truth. The code and data used in this paper are made publicly available at https:\/\/github.com\/QiLi111\/NR-Rec-FUS.", "title":"Nonrigid Reconstruction of Freehand Ultrasound without a Tracker", "authors":[ "Li, Qi", "Shen, Ziyi", "Yang, Qianye", "Barratt, Dean C.", "Clarkson, Matthew J.", "Vercauteren, Tom", "Hu, Yipeng" ], "id":"Conference", "arxiv_id":"2407.05767", "GitHub":[ "https:\/\/github.com\/QiLi111\/NR-Rec-FUS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":191 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2750_paper.pdf", "bibtext":"@InProceedings{ Hua_Finegrained_MICCAI2024,\n author = { Huang, Yijin and Cheng, Pujin and Tam, Roger and Tang, Xiaoying },\n title = { { Fine-grained Prompt Tuning: A Parameter and Memory Efficient Transfer Learning Method for High-resolution Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Parameter-efficient transfer learning (PETL) is proposed as a cost-effective way to transfer pre-trained models to downstream tasks, avoiding the high cost of updating entire large-scale pre-trained models (LPMs). In this work, we present Fine-grained Prompt Tuning (FPT), a novel PETL method for medical image classification. FPT significantly reduces memory consumption compared to other PETL methods, especially in high-resolution input contexts. To achieve this, we first freeze the weights of the LPM and construct a learnable lightweight side network. The frozen LPM takes high-resolution images as input to extract fine-grained features, while the side network is fed low-resolution images to reduce memory usage. To allow the side network to access pre-trained knowledge, we introduce fine-grained prompts that summarize information from the LPM through a fusion module. Important tokens selection and preloading techniques are employed to further reduce training cost and memory requirements. We evaluate FPT on four medical datasets with varying sizes, modalities, and complexities. Experimental results demonstrate that FPT achieves comparable performance to fine-tuning the entire LPM while using only 1.8% of the learnable parameters and 13% of the memory costs of an encoder ViT-B model with a 512 x 512 input resolution.", "title":"Fine-grained Prompt Tuning: A Parameter and Memory Efficient Transfer Learning Method for High-resolution Medical Image Classification", "authors":[ "Huang, Yijin", "Cheng, Pujin", "Tam, Roger", "Tang, Xiaoying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/YijinHuang\/FPT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":192 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3416_paper.pdf", "bibtext":"@InProceedings{ Siv_LiverUSRecon_MICCAI2024,\n author = { Sivayogaraj, Kaushalya and Guruge, Sahan I. T. and Liyanage, Udari A. and Udupihille, Jeevani J. and Jayasinghe, Saroj and Fernando, Gerard M. X. and Rodrigo, Ranga and Liyanaarachchi, Rukshani },\n title = { { LiverUSRecon: Automatic 3D Reconstruction and Volumetry of the Liver with a Few Partial Ultrasound Scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D reconstruction of the liver for volumetry is important for qualitative analysis and disease diagnosis. Liver volumetry using ultrasound (US) scans, although advantageous due to less acquisition time and safety, is challenging due to the inherent noisiness in US scans, blurry boundaries, and partial liver visibility. We address these challenges by using the segmentation masks of a few incomplete sagittal-plane US scans of the liver in conjunction with a statistical shape model (SSM) built using a set of CT scans of the liver. We compute the shape parameters needed to warp this canonical SSM to fit the US scans through a parametric regression network. The resulting 3D liver reconstruction is accurate and leads to automatic liver volume calculation. We evaluate the accuracy of the estimated liver volumes with respect to CT segmentation volumes using RMSE. Our volume computation is statistically much closer to the volume estimated using CT scans than the volume computed using Childs\u2019 method by radiologists: p-value of 0.094 (> 0.05) says that there is no significant difference between CT segmentation volumes and ours in contrast to Childs\u2019 method. We validate our method using investigations (ablation studies) on the US image resolution, the number of CT scans used for SSM, the number of principal components, and the number of input US scans. To the best of our knowledge, this is the first automatic liver volumetry system using a few incomplete US scans given a set of CT scans of livers for SSM. Code and models are available at https:\/\/diagnostics4u.github.io\/", "title":"LiverUSRecon: Automatic 3D Reconstruction and Volumetry of the Liver with a Few Partial Ultrasound Scans", "authors":[ "Sivayogaraj, Kaushalya", "Guruge, Sahan I. T.", "Liyanage, Udari A.", "Udupihille, Jeevani J.", "Jayasinghe, Saroj", "Fernando, Gerard M. X.", "Rodrigo, Ranga", "Liyanaarachchi, Rukshani" ], "id":"Conference", "arxiv_id":"2406.19336", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":193 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1234_paper.pdf", "bibtext":"@InProceedings{ Hwa_Improving_MICCAI2024,\n author = { Hwang, Joonil and Park, Sangjoon and Park, NaHyeon and Cho, Seungryong and Kim, Jin Sung },\n title = { { Improving cone-beam CT Image Quality with Knowledge Distillation-Enhanced Diffusion Model in Imbalanced Data Settings } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"In radiation therapy (RT), the reliance on pre-treatment computed tomography (CT) images encounters challenges due to anatomical changes, necessitating adaptive planning. Daily cone-beam CT (CBCT) imaging, pivotal for therapy adjustment, falls short in tissue density accuracy. To address this, our innovative approach integrates diffusion models for CT image generation, offering precise control over data synthesis. Leveraging a self-training method with knowledge distillation, we maximize CBCT data during therapy, complemented by sparse paired fan-beam CTs. This strategy, incorporated into state-of-the-art diffusion-based models, surpasses conventional methods like Pix2pix and CycleGAN. A meticulously curated dataset of 2800 paired CBCT and CT scans, supplemented by 4200 CBCT scans, undergoes preprocessing and teacher model training, including the Brownian Bridge Diffusion Model (BBDM). Pseudo-label CT images are generated, resulting in a dataset combining 5600 CT images with corre-sponding CBCT images. Thorough evaluation using MSE, SSIM, PSNR and LPIPS demonstrates superior performance against Pix2pix and CycleGAN. Our approach shows promise in generating high-quality CT images from CBCT scans in RT.", "title":"Improving cone-beam CT Image Quality with Knowledge Distillation-Enhanced Diffusion Model in Imbalanced Data Settings", "authors":[ "Hwang, Joonil", "Park, Sangjoon", "Park, NaHyeon", "Cho, Seungryong", "Kim, Jin Sung" ], "id":"Conference", "arxiv_id":"2409.12539", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":194 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2857_paper.pdf", "bibtext":"@InProceedings{ Zha_Deeplearningbased_MICCAI2024,\n author = { Zhang, Yi and Zhao, Yidong and Huang, Lu and Xia, Liming and Tao, Qian },\n title = { { Deep-learning-based groupwise registration for motion correction of cardiac T1 mapping } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Quantitative $T_1$ mapping by MRI is an increasingly important tool for clinical assessment of cardiovascular diseases. The cardiac $T_1$ map is derived by fitting a known signal model to a series of baseline images, while the quality of this map can be deteriorated by involuntary respiratory and cardiac motion. To correct motion, a template image is often needed to register all baseline images, but the choice of template is nontrivial, leading to inconsistent performance sensitive to image contrast. In this work, we propose a novel deep-learning-based groupwise registration framework, which omits the need for a template, and registers all baseline images simultaneously. We design two groupwise losses for this registration framework: the first is a linear principal component analysis (PCA) loss that enforces alignment of baseline images irrespective of the intensity variation, and the second is an auxiliary relaxometry loss that enforces adherence of intensity profile to the signal model. We extensively evaluated our method, termed ``PCA-Relax\u2019\u2019, and other baseline methods on an in-house cardiac MRI dataset including both pre- and post-contrast $T_1$ sequences. All methods were evaluated under three distinct training-and-evaluation strategies, namely, standard, one-shot, and test-time-adaptation. The proposed PCA-Relax showed further improved performance of registration and mapping over well-established baselines. The proposed groupwise framework is generic and can be adapted to applications involving multiple images.", "title":"Deep-learning-based groupwise registration for motion correction of cardiac T1 mapping", "authors":[ "Zhang, Yi", "Zhao, Yidong", "Huang, Lu", "Xia, Liming", "Tao, Qian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":195 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2884_paper.pdf", "bibtext":"@InProceedings{ Xia_IMGGCN_MICCAI2024,\n author = { Xia, Jing and Chan, Yi Hao and Girish, Deepank and Rajapakse, Jagath C. },\n title = { { IMG-GCN: Interpretable Modularity-Guided Structure-Function Interactions Learning for Brain Cognition and Disorder Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Brain structure-function interaction is crucial for cognition and brain disorder analysis, and it is inherently more complex than a simple region-to-region coupling. It exhibits homogeneity at the modular level, with regions of interest (ROIs) within the same module showing more similar neural mechanisms than those across modules. Leveraging modular-level guidance to capture complex structure-function interactions is essential, but such studies are still scarce. Therefore, we propose an interpretable modularity-guided graph convolution network (IMG-GCN) to extract the structure-function interactions across ROIs and highlight the most discriminative interactions relevant to fluid cognition and Parkinson\u2019s disease (PD). Specifically, we design a modularity-guided interactive network that defines modularity-specific convolution operation to learn interactions between structural and functional ROIs according to modular homogeneity. Then, an MLP-based attention model is introduced to identify the most contributed interactions. The interactions are inserted as edges linking structural and functional ROIs to construct a unified combined graph, and GCN is applied for final tasks. Experiments on HCP and PPMI datasets indicate that our proposed method outperforms state-of-the-art multi-model methods in fluid cognition prediction and PD classification. The attention maps reveal that the frontoparietal and default mode structures interacting with visual function are discriminative for fluid cognition, while the subcortical structures interacting with widespread functional modules are associated with PD.", "title":"IMG-GCN: Interpretable Modularity-Guided Structure-Function Interactions Learning for Brain Cognition and Disorder Analysis", "authors":[ "Xia, Jing", "Chan, Yi Hao", "Girish, Deepank", "Rajapakse, Jagath C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":196 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2230_paper.pdf", "bibtext":"@InProceedings{ Son_DINOReg_MICCAI2024,\n author = { Song, Xinrui and Xu, Xuanang and Yan, Pingkun },\n title = { { DINO-Reg: General Purpose Image Encoder for Training-free Multi-modal Deformable Medical Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing medical image registration algorithms rely on either dataset-specific training or local texture-based features to align images. The former cannot be reliably implemented without large modality-specific training datasets, while the latter lacks global semantics and thus could be easily trapped at local minima. In this paper, we present a training-free deformable image registration method, DINO-Reg, leveraging the general purpose image encoder for image feature extraction. The DINOv2 encoder was trained using the ImageNet data containing natural images, but the encoder\u2019s ability to capture semantic information is generalizable even to unseen domains. We present a training-free deep learning-based deformable medical image registration framework based on the DINOv2 encoder. With such semantically rich features, our method can achieve accurate coarse-to-fine registration through simple feature pairing and conventional gradient descent optimization. We conducted a series of experiments to understand the behavior and role of such a general purpose image encoder in the application of image registration. Our method shows state-of-the-art performance in multiple registration datasets. To our knowledge, this is the first application of general vision foundation models in medical image registration.", "title":"DINO-Reg: General Purpose Image Encoder for Training-free Multi-modal Deformable Medical Image Registration", "authors":[ "Song, Xinrui", "Xu, Xuanang", "Yan, Pingkun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":197 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3680_paper.pdf", "bibtext":"@InProceedings{ Stu_SynCellFactory_MICCAI2024,\n author = { Sturm, Moritz and Cerrone, Lorenzo and Hamprecht, Fred A. },\n title = { { SynCellFactory: Generative Data Augmentation for Cell Tracking } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cell tracking remains a pivotal yet challenging task in biomedical research. The full potential of deep learning for this purpose is often untapped due to the limited availability of comprehensive and varied training data sets. In this paper, we present SynCellFactory, a generative method for cell video augmentation.\nAt the heart of SynCellFactory lies the ControlNet architecture, which has been fine-tuned to synthesize cell imagery with photorealistic accuracy in style and motion patterns. This technique enables the creation of synthetic, annotated cell videos that mirror the complexity of authentic microscopy time-lapses.\nOur experiments demonstrate that SynCellFactory boosts the performance of well-established deep learning models for cell tracking, particularly when original training data is sparse.", "title":"SynCellFactory: Generative Data Augmentation for Cell Tracking", "authors":[ "Sturm, Moritz", "Cerrone, Lorenzo", "Hamprecht, Fred A." ], "id":"Conference", "arxiv_id":"2404.16421", "GitHub":[ "https:\/\/github.com\/sciai-lab\/SynCellFactory" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":198 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2943_paper.pdf", "bibtext":"@InProceedings{ Ram_Geometric_MICCAI2024,\n author = { Ramesh, Jayroop and Dinsdale, Nicola and Yeung, Pak-Hei and Namburete, Ana I. L. },\n title = { { Geometric Transformation Uncertainty for Improving 3D Fetal Brain Pose Prediction from Freehand 2D Ultrasound Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurately localizing two-dimensional (2D) ultrasound (US) fetal brain images in the 3D brain, using minimal computational resources, is an important task for automated US analysis of fetal growth and development. We propose an uncertainty-aware deep learning model for automated 3D plane localization in 2D fetal brain images. Specifically, a multi-head network is trained to jointly regress 3D plane pose from 2D images in terms of different geometric transformations. The model explicitly learns to predict uncertainty to allocate higher weight to inputs with low variances across different transformations to improve performance. Our proposed method, QAERTS, demonstrates superior pose estimation accuracy than the state-of-the-art and most of the uncertainty-based approaches, leading to 9% improvement on plane angle (PA) for localization accuracy, and 8% on normalized cross-correlation (NCC) for sampled image quality. QAERTS also demonstrates efficiency, containing 5\u00d7 fewer parameters than ensemble-based approach, making it advantageous in resource-constrained settings. In addition, QAERTS proves to be more robust to noise effects observed in freehand US scanning by leveraging rotational discontinuities and explicit output uncertainties.", "title":"Geometric Transformation Uncertainty for Improving 3D Fetal Brain Pose Prediction from Freehand 2D Ultrasound Videos", "authors":[ "Ramesh, Jayroop", "Dinsdale, Nicola", "Yeung, Pak-Hei", "Namburete, Ana I. L." ], "id":"Conference", "arxiv_id":"2405.13235", "GitHub":[ "https:\/\/github.com\/jayrmh\/QAERTS.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":199 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3191_paper.pdf", "bibtext":"@InProceedings{ Sae_SurvRNC_MICCAI2024,\n author = { Saeed, Numan and Ridzuan, Muhammad and Maani, Fadillah Adamsyah and Alasmawi, Hussain and Nandakumar, Karthik and Yaqub, Mohammad },\n title = { { SurvRNC: Learning Ordered Representations for Survival Prediction using Rank-N-Contrast } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Predicting the likelihood of survival is of paramount importance for individuals diagnosed with cancer as it provides invaluable information regarding prognosis at an early stage. This knowledge enables the formulation of effective treatment plans that lead to improved patient outcomes. In the past few years, deep learning models have provided a feasible solution for assessing medical images, electronic health records, and genomic data to estimate cancer risk scores. However, these models often fall short of their potential because they struggle to learn regression-aware feature representations. In this study, we propose Survival Rank-N-Contrast (SurvRNC) method, which introduces a loss function as a regularizer to obtain an ordered representation based on the survival times. This function can handle censored data and can be incorporated into any survival model to ensure that the learned representation is ordinal. The model was extensively evaluated on a HEad & NeCK TumOR (HECKTOR) segmentation and the outcome-prediction task dataset. We demonstrate that using the SurvRNC method for training can achieve higher performance on different deep survival models. Additionally, it outperforms state-of-the-art methods by 3.6% on the concordance index. The code is publicly available at https:\/\/github.com\/numanai\/SurvRNC.", "title":"SurvRNC: Learning Ordered Representations for Survival Prediction using Rank-N-Contrast", "authors":[ "Saeed, Numan", "Ridzuan, Muhammad", "Maani, Fadillah Adamsyah", "Alasmawi, Hussain", "Nandakumar, Karthik", "Yaqub, Mohammad" ], "id":"Conference", "arxiv_id":"2403.10603", "GitHub":[ "https:\/\/github.com\/numanai\/SurvRNC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":200 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0192_paper.pdf", "bibtext":"@InProceedings{ An_SubjectAdaptive_MICCAI2024,\n author = { An, Sion and Kang, Myeongkyun and Kim, Soopil and Chikontwe, Philip and Shen, Li and Park, Sang Hyun },\n title = { { Subject-Adaptive Transfer Learning Using Resting State EEG Signals for Cross-Subject EEG Motor Imagery Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Electroencephalography (EEG) motor imagery (MI) classification is a fundamental, yet challenging task due to the variation of signals between individuals i.e., inter-subject variability. Previous approaches try to mitigate this using task-specific (TS) EEG signals from the target subject in training. However, recording TS EEG signals requires time and limits its applicability in various fields. In contrast, resting state (RS) EEG signals are a viable alternative due to ease of acquisition with rich subject information. In this paper, we propose a novel subject-adaptive transfer learning strategy that utilizes RS EEG signals to adapt models on unseen subject data. Specifically, we disentangle extracted features into task- and subject-dependent features and use them to calibrate RS EEG signals for obtaining task information while preserving subject characteristics. The calibrated signals are then used to adapt the model to the target subject, enabling the model to simulate processing TS EEG signals of the target subject. The proposed method achieves state-of-the-art accuracy on three public benchmarks, demonstrating the effectiveness of our method in cross-subject EEG MI classification. Our findings highlight the potential of leveraging RS EEG signals to advance practical brain-computer interface systems. The code is available at https:\/\/github.com\/SionAn\/MICCAI2024-ResTL.", "title":"Subject-Adaptive Transfer Learning Using Resting State EEG Signals for Cross-Subject EEG Motor Imagery Classification", "authors":[ "An, Sion", "Kang, Myeongkyun", "Kim, Soopil", "Chikontwe, Philip", "Shen, Li", "Park, Sang Hyun" ], "id":"Conference", "arxiv_id":"2405.19346", "GitHub":[ "https:\/\/github.com\/SionAn\/MICCAI2024-ResTL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":201 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1844_paper.pdf", "bibtext":"@InProceedings{ Zen_Reciprocal_MICCAI2024,\n author = { Zeng, Qingjie and Lu, Zilin and Xie, Yutong and Lu, Mengkang and Ma, Xinke and Xia, Yong },\n title = { { Reciprocal Collaboration for Semi-supervised Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"To acquire information from unlabeled data, current semi-supervised methods are mainly developed based on the mean-teacher or co-training paradigm, with non-controversial optimization objectives so as to regularize the discrepancy in learning towards consistency. \nHowever, these methods suffer from the consensus issue, where the learning process might devolve into vanilla self-training due to identical learning targets. \nTo address this issue, we propose a novel \\textbf{Re}ciprocal \\textbf{Co}llaboration model (ReCo) for semi-supervised medical image classification. \nReCo is composed of a main network and an auxiliary network, which are constrained by distinct while latently consistent objectives. On labeled data, the main network learns from the ground truth acquiescently, while simultaneously generating auxiliary labels utilized as the supervision for the auxiliary network. Specifically, given a labeled image, the auxiliary label is defined as the category with the second-highest classification score predicted by the main network, thus symbolizing the most likely mistaken classification. Hence, the auxiliary network is specifically designed to discern \\emph{which category the image should \\textbf{NOT} belong to}. On unlabeled data, cross pseudo supervision is applied using reversed predictions. Furthermore, feature embeddings are purposefully regularized under the guidance of contrary predictions, with the aim of differentiating between categories susceptible to misclassification.\nWe evaluate our approach on two public benchmarks. Our results demonstrate the superiority of ReCo, which consistently outperforms popular competitors and sets a new state of the art.", "title":"Reciprocal Collaboration for Semi-supervised Medical Image Classification", "authors":[ "Zeng, Qingjie", "Lu, Zilin", "Xie, Yutong", "Lu, Mengkang", "Ma, Xinke", "Xia, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":202 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0187_paper.pdf", "bibtext":"@InProceedings{ Li_Comprehensive_MICCAI2024,\n author = { Li, Wei and Zhang, Jingyang and Heng, Pheng-Ann and Gu, Lixu },\n title = { { Comprehensive Generative Replay for Task-Incremental Segmentation with Concurrent Appearance and Semantic Forgetting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generalist segmentation models are increasingly favored for diverse tasks involving various objects from different image sources. Task-Incremental Learning (TIL) offers a privacy-preserving training paradigm using tasks arriving sequentially, instead of gathering them due to strict data sharing policies. However, the task evolution can span a wide scope that involves shifts in both image appearance and segmentation semantics with intricate correlation, causing concurrent appearance and semantic forgetting. To solve this issue, we propose a Comprehensive Generative Replay (CGR) framework that restores appearance and semantic knowledge by synthesizing image-mask pairs to mimic past task data, which focuses on two aspects: modeling image-mask correspondence and promoting scalability for diverse tasks. Specifically, we introduce a novel Bayesian Joint Diffusion (BJD) model for high-quality synthesis of image-mask pairs with their correspondence explicitly preserved by conditional denoising. Furthermore, we develop a Task-Oriented Adapter (TOA) that recalibrates prompt embeddings to modulate the diffusion model, making the data synthesis compatible with different tasks. Experiments on incremental tasks (cardiac, fundus and prostate segmentation) show its clear advantage for alleviating concurrent appearance and semantic forgetting. Code is available at https:\/\/github.com\/jingyzhang\/CGR.", "title":"Comprehensive Generative Replay for Task-Incremental Segmentation with Concurrent Appearance and Semantic Forgetting", "authors":[ "Li, Wei", "Zhang, Jingyang", "Heng, Pheng-Ann", "Gu, Lixu" ], "id":"Conference", "arxiv_id":"2406.19796", "GitHub":[ "https:\/\/github.com\/jingyzhang\/CGR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":203 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2336_paper.pdf", "bibtext":"@InProceedings{ Aya_UnWaveNet_MICCAI2024,\n author = { Ayad, Ishak and Tarpau, C\u00e9cilia and Cebeiro, Javier and Nguyen, Ma\u00ef K. },\n title = { { UnWave-Net: Unrolled Wavelet Network for Compton Tomography Image Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Computed tomography (CT) is a widely used medical imaging technique to scan internal structures of a body, typically involving collimation and mechanical rotation. Compton scatter tomography (CST) presents an interesting alternative to conventional CT by leveraging Compton physics instead of collimation to gather information from multiple directions. While CST introduces new imaging opportunities with several advantages such as high sensitivity, compactness, and entirely fixed systems, image reconstruction remains an open problem due to the mathematical challenges of CST modeling. In contrast, deep unrolling networks have demonstrated potential in CT image reconstruction, despite their computationally intensive nature. In this study, we investigate the efficiency of unrolling networks for CST image reconstruction. To address the important computational cost required for training, we propose UnWave-Net, a novel unrolled wavelet-based reconstruction network. This architecture includes a non-local regularization term based on wavelets, which captures long-range dependencies within images and emphasizes the multi-scale components of the wavelet transform. We evaluate our approach using a CST of circular geometry which stays completely static during data acquisition, where UnWave-Net facilitates image reconstruction in the absence of a specific reconstruction formula. Our method outperforms existing approaches and achieves state-of-the-art performance in terms of SSIM and PSNR, and offers an improved computational efficiency compared to traditional unrolling networks.", "title":"UnWave-Net: Unrolled Wavelet Network for Compton Tomography Image Reconstruction", "authors":[ "Ayad, Ishak", "Tarpau, C\u00e9cilia", "Cebeiro, Javier", "Nguyen, Ma\u00ef K." ], "id":"Conference", "arxiv_id":"2406.03413", "GitHub":[ "https:\/\/github.com\/Ishak96\/UnWave-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":204 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2479_paper.pdf", "bibtext":"@InProceedings{ Shu_SlideGCD_MICCAI2024,\n author = { Shu, Tong and Shi, Jun and Sun, Dongdong and Jiang, Zhiguo and Zheng, Yushan },\n title = { { SlideGCD: Slide-based Graph Collaborative Training with Knowledge Distillation for Whole Slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing WSI analysis methods lie on the consensus that histopathological characteristics of tumors are significant guidance for cancer diagnostics. Particularly, as the evolution of cancers is a continuous process, the correlations and differences across various stages, anatomical locations and patients should be taken into account. However, recent research mainly focuses on the inner-contextual information in a single WSI, ignoring the correlations between slides. To verify whether introducing the slide inter-correlations can bring improvements to WSI representation learning, we propose a generic WSI analysis pipeline SlideGCD that considers the existing multi-instance learning (MIL) methods as the backbone and forge the WSI classification task as a node classification problem. More specifically, SlideGCD declares a node buffer that stores previous slide embeddings for subsequent extensive slide-based graph construction and conducts graph learning to explore the inter-correlations implied in the slide-based graph. Moreover, we frame the MIL classifier and graph learning into two parallel workflows and deploy the knowledge distillation to transfer the differentiable information to the graph neural network. The consistent performance boosting, brought by SlideGCD, of four previous state-of-the-art MIL methods is observed on two TCGA benchmark datasets.", "title":"SlideGCD: Slide-based Graph Collaborative Training with Knowledge Distillation for Whole Slide Image Classification", "authors":[ "Shu, Tong", "Shi, Jun", "Sun, Dongdong", "Jiang, Zhiguo", "Zheng, Yushan" ], "id":"Conference", "arxiv_id":"2407.08968", "GitHub":[ "https:\/\/github.com\/HFUT-miaLab\/SlideGCD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":205 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2986_paper.pdf", "bibtext":"@InProceedings{ Bai_CrossPhase_MICCAI2024,\n author = { Bai, Bizhe and Zhou, Yan-Jie and Hu, Yujian and Mok, Tony C. W. and Xiang, Yilang and Lu, Le and Zhang, Hongkun and Xu, Minfeng },\n title = { { Cross-Phase Mutual Learning Framework for Pulmonary Embolism Identification on Non-Contrast CT Scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pulmonary embolism (PE) is a life-threatening condition where rapid and accurate diagnosis is imperative yet difficult due to predominantly atypical symptomatology. Computed tomography pulmonary angiography (CTPA) is acknowledged as the gold standard imaging tool in clinics, yet it can be contraindicated for emergency department (ED) patients and represents an onerous procedure, thus necessitating PE identification through non-contrast CT (NCT) scans. In this work, we explore the feasibility of applying a deep-learning approach to NCT scans for PE identification. We propose a novel Cross-Phase Mutual learNing framework (CPMN) that fosters knowledge transfer from CTPA to NCT, while concurrently conducting embolism segmentation and abnormality classification in a multi-task manner. The proposed CPMN leverages the Inter-Feature Alignment (IFA) strategy that enhances spatial contiguity and mutual learning between the dual-pathway network, while the Intra-Feature Discrepancy (IFD) strategy can facilitate precise segmentation of PE against complex backgrounds for single-pathway networks. For a comprehensive assessment of the proposed approach, a large-scale dual-phase dataset containing 334 PE patients and 1,105 normal subjects has been established. Experimental results demonstrate that CPMN achieves the leading identification performance, which is 95.4% and 99.6% in patient-level sensitivity and specificity on NCT scans, indicating the potential of our approach as an economical, accessible, and precise tool for PE identification in clinical practice.", "title":"Cross-Phase Mutual Learning Framework for Pulmonary Embolism Identification on Non-Contrast CT Scans", "authors":[ "Bai, Bizhe", "Zhou, Yan-Jie", "Hu, Yujian", "Mok, Tony C. W.", "Xiang, Yilang", "Lu, Le", "Zhang, Hongkun", "Xu, Minfeng" ], "id":"Conference", "arxiv_id":"2407.11529", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":206 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1643_paper.pdf", "bibtext":"@InProceedings{ Jun_DeformationAware_MICCAI2024,\n author = { Jung, Sunyoung and Choi, Yoonseok and Al-masni, Mohammed A. and Jung, Minyoung and Kim, Dong-Hyun },\n title = { { Deformation-Aware Segmentation Network Robust to Motion Artifacts for Brain Tissue Segmentation using Disentanglement Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Motion artifacts caused by prolonged acquisition time are a significant challenge in Magnetic Resonance Imaging (MRI), hindering accurate tissue segmentation. These artifacts appear as blurred images that mimic tissue-like appearances, making segmentation difficult. This study proposes a novel deep learning framework that demonstrates superior performance in both motion correction and robust brain tissue segmentation in the presence of artifacts. The core concept lies in a complementary process: a disentanglement learning network progressively removes artifacts, leading to cleaner images and consequently, more accurate segmentation by a jointly trained motion estimation and segmentation network. This network generates three outputs: a motion-corrected image, a motion deformation map that identifies artifact-affected regions, and a brain tissue segmentation mask. This deformation serves as a guidance mechanism for the disentanglement process, aiding the model in recovering lost information or removing artificial structures introduced by the artifacts. Extensive in-vivo experiments on pediatric motion data demonstrate that our proposed framework outperforms state-of-the-art methods in segmenting motion-corrupted MRI scans. The code is available at https:\/\/github.com\/SunYJ-hxppy\/Multi-Net.", "title":"Deformation-Aware Segmentation Network Robust to Motion Artifacts for Brain Tissue Segmentation using Disentanglement Learning", "authors":[ "Jung, Sunyoung", "Choi, Yoonseok", "Al-masni, Mohammed A.", "Jung, Minyoung", "Kim, Dong-Hyun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/SunYJ-hxppy\/Multi-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":207 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2333_paper.pdf", "bibtext":"@InProceedings{ Gao_MEDBind_MICCAI2024,\n author = { Gao, Yuan and Kim, Sangwook and Austin, David E and McIntosh, Chris },\n title = { { MEDBind: Unifying Language and Multimodal Medical Data Embeddings } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical vision-language pretraining models (VLPM) have achieved remarkable progress in fusing chest X-rays (CXR) with clinical texts, introducing image-text data binding approaches that enable zero-shot learning and downstream clinical tasks. However, the current landscape lacks the holistic integration of additional medical modalities, such as electrocardiograms (ECG). We present MEDBind (Medical Electronic patient recorD Bind), which learns joint embeddings across CXR, ECG, and text. Using text data as the central anchor, MEDBind features tri-modality binding, delivering competitive performance in top-K retrieval, zero-shot, and few-shot benchmarks against established VLPM, and the ability for CXR-to-ECG zero-shot classification and retrieval. This seamless integration is achieved by combining contrastive loss on modality-text pairs with our proposed contrastive loss function, Edge-Modality Contrastive Loss, fostering a cohesive embedding space for CXR, ECG, and text. Finally, we demonstrate that MEDBind can improve downstream tasks by directly integrating CXR and ECG embeddings into a large-language model for multimodal prompt tuning.", "title":"MEDBind: Unifying Language and Multimodal Medical Data Embeddings", "authors":[ "Gao, Yuan", "Kim, Sangwook", "Austin, David E", "McIntosh, Chris" ], "id":"Conference", "arxiv_id":"2403.12894", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":208 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3719_paper.pdf", "bibtext":"@InProceedings{ Che_Disentangled_MICCAI2024,\n author = { Cheng, Jiale and Wu, Zhengwang and Yuan, Xinrui and Wang, Li and Lin, Weili and Grewen, Karen and Li, Gang },\n title = { { Disentangled Hybrid Transformer for Identification of Infants with Prenatal Drug Exposure } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Prenatal drug exposure, which occurs during a time of extraordinary and critical brain development, is typically associated with cognitive, behavioral, and physiological deficits during infancy, childhood, and adolescence. Early identifying infants with prenatal drug exposures and associated biomarkers using neuroimages can help inform earlier, more effective, and personalized interventions to greatly improve later cognitive outcomes. To this end, we propose a novel deep learning model called disentangled hybrid volume-surface transformer for identifying individual infants with prenatal drug exposures. Specifically, we design two distinct branches, a volumetric network for learning non-cortical features in 3D image space, and a surface network for learning features on the highly convoluted cortical surface manifold. To better capture long-range dependency and generate highly discriminative representations, image and surface transformers are respectively employed for the volume and surface branches. Then, a disentanglement strategy is further proposed to separate the representations from two branches into complementary variables and common variables, thus removing redundant information and boosting expressive capability. After that, the disentangled representations are concatenated to a classifier to determine if there is an existence of prenatal drug exposures. We have validated our method on 210 infant MRI scans and demonstrated its superior performance, compared to ablated models and state-of-the-art methods.", "title":"Disentangled Hybrid Transformer for Identification of Infants with Prenatal Drug Exposure", "authors":[ "Cheng, Jiale", "Wu, Zhengwang", "Yuan, Xinrui", "Wang, Li", "Lin, Weili", "Grewen, Karen", "Li, Gang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":209 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1746_paper.pdf", "bibtext":"@InProceedings{ Chu_Anatomicconstrained_MICCAI2024,\n author = { Chu, Yuetan and Yang, Changchun and Luo, Gongning and Qiu, Zhaowen and Gao, Xin },\n title = { { Anatomic-constrained Medical Image Synthesis via Physiological Density Sampling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite substantial progress in utilizing deep learning methods for clinical diagnosis, their efficacy depends on sufficient annotated data, which is often limited available owing to the extensive manual efforts required for labeling. Although prevalent data synthesis techniques can mitigate such data scarcity, they risk generating outputs with distorted anatomy that poorly represent real-world data. We address this challenge through a novel integration of anatomically constrained synthesis with registration uncertainty-based refinement, termed Anatomic-Constrained medical image Synthesis (ACIS). Specifically, we (1) generate the pseudo-mask via the physiological density estimation and Voronoi tessellation to represent the spatial anatomical information as the image synthesis prior; (2) synthesize diverse yet realistic image-annotation guided by the pseudo-masks, and (3) refine the outputs by registration uncertainty estimation to encourage the anatomical consistency between synthesized and real-world images. We validate ACIS for improving performance in both segmentation and image reconstruction tasks for few-shot learning. Experiments across diverse datasets demonstrate that ACIS outperforms state-of-the-art image synthesis techniques and enables models trained on only 10% or less of the total training data to achieve comparable or superior performance to that of models trained on complete datasets. The source code is publicly available at https:\/\/github.com\/Arturia-Pendragon-Iris\/VonoroiGeneration.", "title":"Anatomic-constrained Medical Image Synthesis via Physiological Density Sampling", "authors":[ "Chu, Yuetan", "Yang, Changchun", "Luo, Gongning", "Qiu, Zhaowen", "Gao, Xin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":210 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0495_paper.pdf", "bibtext":"@InProceedings{ Zha_IHCSurv_MICCAI2024,\n author = { Zhang, Yejia and Chao, Hanqing and Qiu, Zhongwei and Liu, Wenbin and Shen, Yixuan and Sapkota, Nishchal and Gu, Pengfei and Chen, Danny Z. and Lu, Le and Yan, Ke and Jin, Dakai and Bian, Yun and Jiang, Hui },\n title = { { IHCSurv: Effective Immunohistochemistry Priors for Cancer Survival Analysis in Gigapixel Multi-stain Whole Slide Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent cancer survival prediction approaches have made great\nstrides in analyzing H&E-stained gigapixel whole-slide images. However, methods targeting the immunohistochemistry (IHC) modality remain largely unexplored. We remedy this methodological gap and propose IHCSurv, a new framework that leverages IHC-specific priors to improve downstream survival prediction. We use these priors to guide our model to the most prognostic tissue regions and simultaneously enrich local features. To address drawbacks in recent approaches related to limited spatial context and cross-regional relation modeling, we propose a spatially-constrained spectral clustering algorithm that preserves spatial context alongside an efficient tissue region encoder that facilitates information transfer across tissue regions both within and between images. We evaluate our framework on a multi-stain IHC dataset of pancreatic cancer patients, where IHCSurv markedly outperforms existing state-of-the-art survival prediction methods.", "title":"IHCSurv: Effective Immunohistochemistry Priors for Cancer Survival Analysis in Gigapixel Multi-stain Whole Slide Images", "authors":[ "Zhang, Yejia", "Chao, Hanqing", "Qiu, Zhongwei", "Liu, Wenbin", "Shen, Yixuan", "Sapkota, Nishchal", "Gu, Pengfei", "Chen, Danny Z.", "Lu, Le", "Yan, Ke", "Jin, Dakai", "Bian, Yun", "Jiang, Hui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/charzharr\/miccai24-ihcsurv" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":211 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0074_paper.pdf", "bibtext":"@InProceedings{ Liu_UinTSeg_MICCAI2024,\n author = { Liu, Jiameng and Liu, Feihong and Sun, Kaicong and Sun, Yuhang and Huang, Jiawei and Jiang, Caiwen and Rekik, Islem and Shen, Dinggang },\n title = { { UinTSeg: Unified Infant Brain Tissue Segmentation with Anatomy Delineation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate brain tissue segmentation is a vital prerequisite for charting infant brain development and for diagnosing early brain disorders. However, due to inherently ongoing myelination and maturation, the intensity distributions of gray matter (GM) and white matter (WM) on T1-weighted (T1w) data undergo substantial variations in intensity from neonatal to 24 months. Especially at the ages around 6 months, the intensity distributions of GM and WM are highly overlapped. These physiological phenomena pose great challenges for automatic infant brain tissue segmentation, even for expert radiologists. To address these issues, in this study, we present a unified infant brain tissue segmentation (UinTSeg) framework to accurately segment brain tissues of infants aged 0-24 months using a single model. UinTSeg comprises two stages: 1) boundary extraction and 2) tissue segmentation. In the first stage, to alleviate the difficulty of tissue segmentation caused by variations in intensity, we extract the intensity-invariant tissue boundaries from T1w data driven by edge maps extracted from the Sobel filter. In the second stage, the Sobel edge maps and extracted boundaries of GM, WM, and cerebrospinal fluid (CSF) are utilized as intensity-invariant anatomy information to ensure unified and accurate tissue segmentation in infants age period of 0-24 months. Both stages are built upon an attention-based surrounding-aware segmentation network (ASNet), which exploits the contextual information from multi-scale patches to improve the segmentation performance. Extensive experiments on the baby connectome project dataset demonstrate the superiority of our proposed framework over five state-of-the-art methods.", "title":"UinTSeg: Unified Infant Brain Tissue Segmentation with Anatomy Delineation", "authors":[ "Liu, Jiameng", "Liu, Feihong", "Sun, Kaicong", "Sun, Yuhang", "Huang, Jiawei", "Jiang, Caiwen", "Rekik, Islem", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":212 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0481_paper.pdf", "bibtext":"@InProceedings{ Liu_Overlay_MICCAI2024,\n author = { Liu, Jiacheng and Qian, Wenhua and Cao, Jinde and Liu, Peng },\n title = { { Overlay Mantle-Free for Semi-Supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised medical image segmentation, crucial for medical research, enhances model generalization using unlabeled data with minimal labeled data. Current methods face edge uncertainty and struggle to learn specific shapes from pixel classification alone. To address these issues, we proposed two-stage knowledge distillation approach employs a teacher model to distill information from labeled data, enhancing the student model with unlabeled data. In the first stage, we use true labels to augment data and sharpen target edges to make teacher predictions more confident. In the second stage, we freeze the teacher model parameters to generate pseudo labels for unlabeled data and guide the student model to learn. By feeding the original background image to the teacher and the enhanced image to the student, The student model learns the information hidden under the mantle and the overall shape of hidden information of the segmented target. Experimental results on the Left Atrium dataset surpass existing methods. Our overlay mantle-free training method enables segmentation based on learned shape information even in data loss scenarios, exhibiting improved edge segmentation accuracy. The code is available at https:\/\/github.com\/vigilliu\/OMF.", "title":"Overlay Mantle-Free for Semi-Supervised Medical Image Segmentation", "authors":[ "Liu, Jiacheng", "Qian, Wenhua", "Cao, Jinde", "Liu, Peng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/vigilliu\/OMF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":213 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0067_paper.pdf", "bibtext":"@InProceedings{ Zen_Missing_MICCAI2024,\n author = { Zeng, Zhilin and Peng, Zelin and Yang, Xiaokang and Shen, Wei },\n title = { { Missing as Masking: Arbitrary Cross-modal Feature Reconstruction for Incomplete Multimodal Brain Tumor Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic brain tumor segmentation using multimodal MRI images is a critical task in medical imaging. A complete set of multimodal MRI images for a subject offers comprehensive views of brain tumors, and thus providing ideal tumor segmentation performance. However, acquiring such modality-complete data for every subject is frequently impractical in clinical practice, which requires a segmentation model to be able to 1) flexibly leverage both modality-complete and modality-incomplete data for model training, and 2) prevent significant performance degradation in inference if certain modalities are missing. To meet these two demands, in this paper, we propose M$^3$FeCon (\\textbf{M}issing as \\textbf{M}asking: arbitrary cross-\\textbf{M}odal \\textbf{Fe}ature Re\\textbf{Con}struction) for incomplete multimodal brain tumor segmentation, which can learn approximate modality-complete feature representations from modality-incomplete data. Specifically, we treat missing modalities also as masked modalities, and employ a strategy similar to Masked Autoencoder (MAE) to learn feature-to-feature reconstruction across arbitrary modality combinations. The reconstructed features for missing modalities act as supplements to form approximate modality-complete feature representations. Extensive evaluations on the BraTS18 dataset demonstrate that our method achieves state-of-the-art performance in brain tumor segmentation with incomplete modalities, especicall in enhancing tumor with 4.61\\% improvement in terms of Dice score.", "title":"Missing as Masking: Arbitrary Cross-modal Feature Reconstruction for Incomplete Multimodal Brain Tumor Segmentation", "authors":[ "Zeng, Zhilin", "Peng, Zelin", "Yang, Xiaokang", "Shen, Wei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":214 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1810_paper.pdf", "bibtext":"@InProceedings{ Luo_Textual_MICCAI2024,\n author = { Luo, Yuanjiang and Li, Hongxiang and Wu, Xuan and Cao, Meng and Huang, Xiaoshuang and Zhu, Zhihong and Liao, Peixi and Chen, Hu and Zhang, Yi },\n title = { { Textual Inversion and Self-supervised Refinement for Radiology Report Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing mainstream approaches follow the encoder-decoder paradigm for generating radiology reports. They focus on improving the network structure of encoders and decoders, which leads to two shortcomings: overlooking the modality gap and ignoring report content constraints. In this paper, we proposed Textual Inversion and Self-supervised Refinement (TISR) to address the above two issues. Specifically, textual inversion can project text and image into the same space by representing images as pseudo words to eliminate the cross-modeling gap. Subsequently, self-supervised refinement refines these pseudo words through contrastive loss computation between images and texts, enhancing the fidelity of generated reports to images. Notably, TISR is orthogonal to most existing methods, plug-and-play. We conduct experiments on two widely-used public datasets and achieve significant improvements on various baselines, which demonstrates the effectiveness and generalization of TISR. The code will be available soon.", "title":"Textual Inversion and Self-supervised Refinement for Radiology Report Generation", "authors":[ "Luo, Yuanjiang", "Li, Hongxiang", "Wu, Xuan", "Cao, Meng", "Huang, Xiaoshuang", "Zhu, Zhihong", "Liao, Peixi", "Chen, Hu", "Zhang, Yi" ], "id":"Conference", "arxiv_id":"2405.20607", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":215 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1632_paper.pdf", "bibtext":"@InProceedings{ Den_Enable_MICCAI2024,\n author = { Deng, Zhipeng and Luo, Luyang and Chen, Hao },\n title = { { Enable the Right to be Forgotten with Federated Client Unlearning in Medical Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The right to be forgotten, as stated in most data regulations, poses an underexplored challenge in federated learning (FL), leading to the development of federated unlearning (FU). However, current FU approaches often face trade-offs between efficiency, model performance, forgetting efficacy, and privacy preservation. In this paper, we delve into the paradigm of Federated Client Unlearning to guarantee a client the right to erase the contribution or the influence, introducing the first FU framework in medical imaging. In the unlearning process of a client, the proposed Model-Contrastive Unlearning marks a pioneering step towards feature-level unlearning, and Frequency-Guided Memory Preservation ensures smooth forgetting of local knowledge while maintaining the generalizability of the trained global model, thus avoiding performance compromises and guaranteeing rapid post-training. We evaluate our FCU framework on two public medical image datasets, including Intracranial hemorrhage diagnosis and skin lesion diagnosis, demonstrating our proposed framework outperforms other state-of-the-art FU frameworks working, with an expected speed-up of 10-15 times compared with retraining from scratch. The code and organized datasets will be made public.", "title":"Enable the Right to be Forgotten with Federated Client Unlearning in Medical Imaging", "authors":[ "Deng, Zhipeng", "Luo, Luyang", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2407.02356", "GitHub":[ "https:\/\/github.com\/dzp2095\/FCU" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":216 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0638_paper.pdf", "bibtext":"@InProceedings{ Yan_DCrownFormer_MICCAI2024,\n author = { Yang, Su and Han, Jiyong and Lim, Sang-Heon and Yoo, Ji-Yong and Kim, SuJeong and Song, Dahyun and Kim, Sunjung and Kim, Jun-Min and Yi, Won-Jin },\n title = { { DCrownFormer: Morphology-aware Point-to-Mesh Generation Transformer for Dental Crown Prosthesis from 3D Scan Data of Antagonist and Preparation Teeth } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dental prosthesis is important in designing artificial replacements to restore the function and appearance of teeth. However, designing a patient-specific dental prosthesis is still labor-intensive and depends on dental professionals with knowledge of oral anatomy and their experience. Also, the initial tooth template for designing dental crowns is not personalized. In this paper, we propose a novel point-to-mesh generation transformer (DCrownFormer) to directly and efficiently generate dental crown meshes from point inputs of 3D scans of antagonist and preparation teeth. Specifically, to learn morphological relationships between a point input and generated points of a dental crown, we introduce a morphology-aware cross-attention module (MCAM) in a transformer decoder and curvature-penalty loss (CPL). Furthermore, we adopt Differentiable Poisson surface reconstruction for mesh reconstruction from generated points and normals of a dental crown by directly optimizing an indicator function using mesh reconstruction loss (MRL). Experimental results demonstrate the superiority of DCrwonFormer compared with other methods, by improving morphological details of occlusal surfaces such as dental grooves and cusps. We further validate the effectiveness of MCAM, MRL, and significant benefits of CPL through ablation studies. The code is available at https:\/\/github.com\/suyang93\/DCrownFormer\/.", "title":"DCrownFormer: Morphology-aware Point-to-Mesh Generation Transformer for Dental Crown Prosthesis from 3D Scan Data of Antagonist and Preparation Teeth", "authors":[ "Yang, Su", "Han, Jiyong", "Lim, Sang-Heon", "Yoo, Ji-Yong", "Kim, SuJeong", "Song, Dahyun", "Kim, Sunjung", "Kim, Jun-Min", "Yi, Won-Jin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":217 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1664_paper.pdf", "bibtext":"@InProceedings{ Dai_SaSaMIM_MICCAI2024,\n author = { Dai, Pengyu and Ou, Yafei and Yang, Yuqiao and Liu, Dichao and Hashimoto, Masahiro and Jinzaki, Masahiro and Miyake, Mototaka and Suzuki, Kenji },\n title = { { SaSaMIM: Synthetic Anatomical Semantics-Aware Masked Image Modeling for Colon Tumor Segmentation in Non-contrast Abdominal Computed Tomography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Colorectal cancer (CRC) is a critical global concern, despite advancements in computer-aided techniques, the development of early-stage computer-aided segmentation holds substantial clinical potential and warrants further exploration. This can be attributed to the challenge for localizing tumor-related information within the colonic region of the abdomen when doing segmentation and that cancerous tissue remains indistinguishable from surrounding tissue even with contrast enhancement. In this work, a task-oriented Synthetic anatomical Semantics-aware Masked Image Modeling (SaSaMIM) method is proposed that leverages both existing and synthesized semantics for efficient utilization of unlabeled data. We first introduce a novel fine-grain synthetic mask modeling strategy that effectively integrates coarse organ semantics and synthetic tumor semantics in a label-free manner. Thus, tumor location perception in the pretraining phase is achieved by means of integrating both semantics. Next, a frequency-aware decoding branch is designed to achieve further supervision and representation of the Gaussian noise-based tumor semantics. Since the CT intensity of tumors follows Gaussian distribution, representation in the frequency domain solves the difficulty in distinguishing cancerous tissues from surrounding healthy tissues due to their homogeneity. To demonstrate the proposed method\u2019s performance, a non-contrast CT (NCCT) colon cancer dataset was assembled, aiming at early tumor diagnosis in a broader clinical setting. We validate our approach on a cross-validation of these 110 cases and outperform the current SOTA self-supervised method for 5% Dice score improvement on average. Comprehensive experiments have confirmed the e\ufb00icacy of our proposed method. To our knowledge, this is the first study to apply task-oriented self-supervised learning methods on NCCT to achieve end-to-end early-stage colon tumor segmentation.", "title":"SaSaMIM: Synthetic Anatomical Semantics-Aware Masked Image Modeling for Colon Tumor Segmentation in Non-contrast Abdominal Computed Tomography", "authors":[ "Dai, Pengyu", "Ou, Yafei", "Yang, Yuqiao", "Liu, Dichao", "Hashimoto, Masahiro", "Jinzaki, Masahiro", "Miyake, Mototaka", "Suzuki, Kenji" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Da1daidaidai\/SaSaMIM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":218 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3032_paper.pdf", "bibtext":"@InProceedings{ Yu_Material_MICCAI2024,\n author = { Yu, Xiaopeng and Wu, Qianyu and Qin, Wenhui and Zhong, Tao and Su, Mengqing and Ma, Jinglu and Zhang, Yikun and Ji, Xu and Quan, Guotao and Chen, Yang and Du, Yanfeng and Lai, Xiaochun },\n title = { { Material Decomposition in Photon-Counting CT: A Deep Learning Approach Driven by Detector Physics and ASIC Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Photon-counting computed tomography (PCCT) based on photon-counting detectors (PCDs) stands out as a cutting-edge CT technology, offering enhanced spatial resolution, reduced radiation dose, and advanced material decomposition capabilities. Despite its recognized advantages, challenges arise from real-world phenomena such as PCD charge-sharing effects, application-specific integrated circuit (ASIC) pile-up, and spectrum shift, introducing a disparity between actual physical effects and the assumptions made in ideal physics models. This misalignment can lead to substantial errors during image reconstruction processes, particularly in material decomposition. In this paper, we introduce a novel detector physics and ASIC model-guided deep learning system model tailored for PCCT. This model adeptly captures the comprehensive response of the PCCT system, encompassing both detector and ASIC responses. We present experimental results demonstrating the model\u2019s exceptional accuracy and robustness. Key advancements include reduced calibration errors, enhanced quality in material decomposition imaging, and improved quantitative consistency. This model represents a significant stride in bridging the gap between theoretical assumptions and practical complexities of PCCT, paving the way for more precise and reliable medical imaging.", "title":"Material Decomposition in Photon-Counting CT: A Deep Learning Approach Driven by Detector Physics and ASIC Modeling", "authors":[ "Yu, Xiaopeng", "Wu, Qianyu", "Qin, Wenhui", "Zhong, Tao", "Su, Mengqing", "Ma, Jinglu", "Zhang, Yikun", "Ji, Xu", "Quan, Guotao", "Chen, Yang", "Du, Yanfeng", "Lai, Xiaochun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":219 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1656_paper.pdf", "bibtext":"@InProceedings{ Li_EndoSelf_MICCAI2024,\n author = { Li, Wenda and Hayashi, Yuichiro and Oda, Masahiro and Kitasaka, Takayuki and Misawa, Kazunari and Mori, Kensaku },\n title = { { EndoSelf: Self-Supervised Monocular 3D Scene Reconstruction of Deformable Tissues with Neural Radiance Fields on Endoscopic Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neural radiance field has recently emerged as a powerful representation to reconstruct deformable tissues from endoscopic videos. Previous methods mainly focus on depth-supervised approaches based on endoscopic datasets. As additional information, depth values were proven important in reconstructing deformable tissues by previous methods. However, collecting a large number of datasets with accurate depth values limits the applicability of these approaches for endoscopic scenes.\nTo address this issue, we propose a novel self-supervised monocular 3D scene reconstruction method based on neural radiance fields without prior depth as supervision. We consider the monocular 3D reconstruction based on two approaches: ray-tracing-based neural radiance fields and structure-from-motion-based photogrammetry. We introduce structure from motion framework and leverage color values as a supervision to complete the self-supervised learning strategy. In addition, we predict the depth values from neural radiance fields and enforce the geometric constraint for depth values from adjacent views. Moreover, we propose a looped loss function to fully explore the temporal correlation between input images. The experimental results showed that the proposed method without prior depth outperformed the previous depth-supervised methods on two endoscopic datasets. Our code is available.", "title":"EndoSelf: Self-Supervised Monocular 3D Scene Reconstruction of Deformable Tissues with Neural Radiance Fields on Endoscopic Videos", "authors":[ "Li, Wenda", "Hayashi, Yuichiro", "Oda, Masahiro", "Kitasaka, Takayuki", "Misawa, Kazunari", "Mori, Kensaku" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MoriLabNU\/EndoSelf" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":220 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1415_paper.pdf", "bibtext":"@InProceedings{ Tan_HFResDiff_MICCAI2024,\n author = { Tang, Zixin and Jiang, Caiwen and Cui, Zhiming and Shen, Dinggang },\n title = { { HF-ResDiff: High-Frequency-guided Residual Diffusion for Multi-dose PET Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Positron Emission Tomography (PET), an advanced nuclear imaging technology capable of visualizing human biological processes, plays an irreplaceable role in diagnosing various diseases. Nonetheless, PET imaging necessitates the administration of radionuclides into the human body, inevitably leading to radiation exposure. To mitigate the risk, many studies seek to reconstruct high-quality standard-dose PET from low-dose PET to reduce the required dosage of radionuclides. However, these efforts perform poorly in capturing high-frequency details in images. Meanwhile, they are limited to single-dose PET reconstruction, overlooking a clinical fact: due to inherent individual variations among patients, the actual dose level of PET images obtained can exhibit considerable discrepancies. In this paper, we propose a multi-dose PET reconstruction framework that aligns closely with clinical requirements and effectively preserves high-frequency information. Specifically, we design a High-Frequency-guided Residual Diffusion for Multi-dose PET Reconstruction (HF-ResDiff) that enhances traditional diffusion models by 1) employing a simple CNN to predict low-frequency content, allowing the diffusion model to focus more on high-frequency counterparts while significantly promoting the training efficiency, 2) incorporating a Frequency Domain Information Separator and a High-frequency-guided Cross-attention to further assist the diffusion model in accurately recovering high-frequency details, and 3) embedding a dose control module to enable the diffusion model to accommodate PET reconstruction at different dose levels. Through extensive experiments, our HF-ResDiff outperforms the state-of-the-art methods in PET reconstruction across multiple doses.", "title":"HF-ResDiff: High-Frequency-guided Residual Diffusion for Multi-dose PET Reconstruction", "authors":[ "Tang, Zixin", "Jiang, Caiwen", "Cui, Zhiming", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":221 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1466_paper.pdf", "bibtext":"@InProceedings{ Zha_Whole_MICCAI2024,\n author = { Zhang, Yundi and Chen, Chen and Shit, Suprosanna and Starck, Sophie and Rueckert, Daniel and Pan, Jiazhen },\n title = { { Whole Heart 3D+T Representation Learning Through Sparse 2D Cardiac MR Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cardiac Magnetic Resonance (CMR) imaging serves as the gold-standard for evaluating cardiac morphology and function. Typically, a multi-view CMR stack, covering short-axis (SA) and 2\/3\/4-chamber long-axis (LA) views, is acquired for a thorough cardiac assessment. However, efficiently streamlining the complex, high-dimensional 3D+T CMR data and distilling compact, coherent representation remains a challenge. In this work, we introduce a whole-heart self-supervised learning framework that utilizes masked imaging modeling to automatically uncover the correlations between spatial and temporal patches throughout the cardiac stacks. This process facilitates the generation of meaningful and well-clustered heart representations without relying on the traditionally required, and often costly, labeled data. The learned heart representation can be directly used for various downstream tasks. Furthermore, our method demonstrates remarkable robustness, ensuring consistent representations even when certain CMR planes are missing\/flawed. We train our model on 14,000 unlabeled CMR data from UK BioBank and evaluate it on 1,000 annotated data. The proposed method demonstrates superior performance to baselines in tasks that demand comprehensive 3D+T cardiac information, e.g. cardiac phenotype (ejection fraction and ventricle volume) prediction and multi-plane\/multi-frame CMR segmentation, highlighting its effectiveness in extracting comprehensive cardiac features that are both anatomically and pathologically relevant. The code is available at https:\/\/github.com\/Yundi-Zhang\/WholeHeartRL.git.", "title":"Whole Heart 3D+T Representation Learning Through Sparse 2D Cardiac MR Images", "authors":[ "Zhang, Yundi", "Chen, Chen", "Shit, Suprosanna", "Starck, Sophie", "Rueckert, Daniel", "Pan, Jiazhen" ], "id":"Conference", "arxiv_id":"2406.00329", "GitHub":[ "https:\/\/github.com\/Yundi-Zhang\/WholeHeartRL.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":222 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0511_paper.pdf", "bibtext":"@InProceedings{ Pug_Enhancing_MICCAI2024,\n author = { Puglisi, Lemuel and Alexander, Daniel C. and Rav\u00ec, Daniele },\n title = { { Enhancing Spatiotemporal Disease Progression Models via Latent Diffusion and Prior Knowledge } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this work, we introduce Brain Latent Progression (BrLP), a novel spatiotemporal disease progression model based on latent diffusion. BrLP is designed to predict the evolution of diseases at the individual level on 3D brain MRIs. Existing deep generative models developed for this task are primarily data-driven and face challenges in learning disease progressions. BrLP addresses these challenges by incorporating prior knowledge from disease models to enhance the accuracy of predictions. To implement this, we propose to integrate an auxiliary model that infers volumetric changes in various brain regions. Additionally, we introduce Latent Average Stabilization (LAS), a novel technique to improve spatiotemporal consistency of the predicted progression. BrLP is trained and evaluated on a large dataset comprising 11,730 T1-weighted brain MRIs from 2,805 subjects, collected from three publicly available, longitudinal Alzheimer\u2019s Disease (AD) studies. In our experiments, we compare the MRI scans generated by BrLP with the actual follow-up MRIs available from the subjects, in both cross-sectional and longitudinal settings. BrLP demonstrates significant improvements over existing methods, with an increase of 22% in volumetric accuracy across AD-related brain regions and 43% in image similarity to the ground-truth scans. The ability of BrLP to generate conditioned 3D scans at the subject level, along with the novelty of integrating prior knowledge to enhance accuracy, represents a significant advancement in disease progression modeling, opening new avenues for precision medicine. The code of BrLP is available at the following link: https:\/\/github.com\/LemuelPuglisi\/BrLP.", "title":"Enhancing Spatiotemporal Disease Progression Models via Latent Diffusion and Prior Knowledge", "authors":[ "Puglisi, Lemuel", "Alexander, Daniel C.", "Rav\u00ec, Daniele" ], "id":"Conference", "arxiv_id":"2405.03328", "GitHub":[ "https:\/\/github.com\/LemuelPuglisi\/BrLP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":223 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1110_paper.pdf", "bibtext":"@InProceedings{ Mor_Topological_MICCAI2024,\n author = { Morlana, Javier and Tard\u00f3s, Juan D. and Montiel, Jos\u00e9 M. M. },\n title = { { Topological SLAM in colonoscopies leveraging deep features and topological priors } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"We introduce ColonSLAM, a system that combines classical multiple-map metric SLAM with deep features and topological priors to create topological maps of the whole colon. The SLAM pipeline by itself is able to create disconnected individual metric submaps representing locations from short video subsections of the colon, but is not able to merge covisible submaps due to deformations and the limited performance of the SIFT descriptor in the medical domain. ColonSLAM is guided by topological priors and combines a deep localization network trained to distinguish if two images come from the same place or not and the soft verification of a transformer-based matching network, being able to relate far-in-time submaps during an exploration, grouping them in nodes imaging the same colon place, building more complex maps than any other approach in the literature. We demonstrate our approach in the Endomapper dataset, showing its potential for producing maps of the whole colon in real human explorations. Code and models are available at: https:\/\/github.com\/endomapper\/ColonSLAM", "title":"Topological SLAM in colonoscopies leveraging deep features and topological priors", "authors":[ "Morlana, Javier", "Tard\u00f3s, Juan D.", "Montiel, Jos\u00e9 M. M." ], "id":"Conference", "arxiv_id":"2409.16806", "GitHub":[ "https:\/\/github.com\/endomapper\/ColonSLAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":224 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2366_paper.pdf", "bibtext":"@InProceedings{ Ceb_Vesselaware_MICCAI2024,\n author = { Ceballos-Arroyo, Alberto M. and Nguyen, Hieu T. and Zhu, Fangrui and Yadav, Shrikanth M. and Kim, Jisoo and Qin, Lei and Young, Geoffrey and Jiang, Huaizu },\n title = { { Vessel-aware aneurysm detection using multi-scale deformable 3D attention } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Manual detection of intracranial aneurysms in computed tomography (CT) scans is a complex, time-consuming task even for expert clinicians, and automating the process is no less challenging. Critical difficulties associated with detecting aneurysms include their small (yet varied) size compared to scans and a high potential for false positive (FP) predictions. To address these issues, we propose a 3D, multi-scale neural architecture that detects aneurysms via a deformable attention mechanism that operates on vessel distance maps derived from vessel segmentations and 3D features extracted from the layers of a convolutional network. Likewise, we reformulate aneurysm segmentation as bounding cuboid prediction using binary cross entropy and three localization losses (location, size, IoU). Given three validation sets comprised of 152\/138\/38 CT scans and containing 126\/101\/58 aneurysms, we achieved a Sensitivity of 91.3%\/97.0%\/74.1% @ FP rates 0.53\/0.56\/0.87, with Sensitivity\naround 80% on small aneurysms. Manual inspection of outputs by experts showed our model only tends to miss aneurysms located in unusual locations. Code and model weights are available online.", "title":"Vessel-aware aneurysm detection using multi-scale deformable 3D attention", "authors":[ "Ceballos-Arroyo, Alberto M.", "Nguyen, Hieu T.", "Zhu, Fangrui", "Yadav, Shrikanth M.", "Kim, Jisoo", "Qin, Lei", "Young, Geoffrey", "Jiang, Huaizu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/alceballosa\/deform-aneurysm-detection" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":225 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2127_paper.pdf", "bibtext":"@InProceedings{ Don_Prompt_MICCAI2024,\n author = { Dong, Zijian and Wu, Yilei and Chen, Zijiao and Zhang, Yichi and Jin, Yueming and Zhou, Juan Helen },\n title = { { Prompt Your Brain: Scaffold Prompt Tuning for Efficient Adaptation of fMRI Pre-trained Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"We introduce Scaffold Prompt Tuning (ScaPT), a novel prompt-based framework for adapting large-scale functional magnetic resonance imaging (fMRI) pre-trained models to downstream tasks, with high parameter efficiency and improved performance compared to fine-tuning and baselines for prompt tuning. The full fine-tuning updates all pre-trained parameters, which may distort the learned feature space and lead to overfitting with limited training data which is common in fMRI fields. In contrast, we design a hierarchical prompt structure that transfers the knowledge learned from high-resource tasks to low-resource ones. This structure, equipped with a Deeply-conditioned Input-Prompt (DIP) mapping module, allows for efficient adaptation by updating only 2% of the trainable parameters. The framework enhances semantic interpretability through attention mechanisms between inputs and prompts, and it clusters prompts in the latent space in alignment with prior knowledge. Experiments on public resting state fMRI datasets reveal ScaPT outperforms fine-tuning and multitask-based prompt tuning in neurodegenerative diseases diagnosis\/prognosis and personality trait prediction, even with fewer than 20 participants. It highlights ScaPT\u2019s efficiency in adapting pre-trained fMRI models to low-resource tasks.", "title":"Prompt Your Brain: Scaffold Prompt Tuning for Efficient Adaptation of fMRI Pre-trained Model", "authors":[ "Dong, Zijian", "Wu, Yilei", "Chen, Zijiao", "Zhang, Yichi", "Jin, Yueming", "Zhou, Juan Helen" ], "id":"Conference", "arxiv_id":"2408.10567", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":226 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3515_paper.pdf", "bibtext":"@InProceedings{ Nih_Estimation_MICCAI2024,\n author = { Nihalaani, Rachaell and Kataria, Tushar and Adams, Jadie and Elhabian, Shireen Y. },\n title = { { Estimation and Analysis of Slice Propagation Uncertainty in 3D Anatomy Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Supervised methods for 3D anatomy segmentation demonstrate superior performance but are often limited by the availability of annotated data. This limitation has led to a growing interest in self-supervised approaches in tandem with the abundance of available un-annotated data. Slice propagation has recently emerged as an self-supervised approach that leverages slice registration as a self-supervised task to achieve full anatomy segmentation with minimal supervision. This approach significantly reduces the need for domain expertise, time and the cost associated with building fully annotated datasets required for training segmentation networks. However, this shift toward reduced supervision via deterministic networks raises concerns about the trustworthiness and reliability of predictions, especially when compared with more accurate supervised approaches. To address this concern, we propose the\nintegration of calibrated uncertainty quantification (UQ) into slice propagation methods, providing insights into the model\u2019s predictive reliability and confidence levels. Incorporating uncertainty measures enhances user confidence in self-supervised approaches, thereby improving their practical applicability. We conducted experiments on three datasets for 3D abdominal segmentation using five different UQ methods. The results illustrate that incorporating UQ not only improves model trustworthiness, but also segmentation accuracy. Furthermore, our analysis reveals various failure modes of slice propagation methods that might not be immediately apparent to end-users. This opens up new research avenues to improve the accuracy and trustworthiness of slice propagation methods.", "title":"Estimation and Analysis of Slice Propagation Uncertainty in 3D Anatomy Segmentation", "authors":[ "Nihalaani, Rachaell", "Kataria, Tushar", "Adams, Jadie", "Elhabian, Shireen Y." ], "id":"Conference", "arxiv_id":"2403.12290", "GitHub":[ "https:\/\/github.com\/RachaellNihalaani\/SlicePropUQ" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":227 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1627_paper.pdf", "bibtext":"@InProceedings{ Liu_SwinUMamba_MICCAI2024,\n author = { Liu, Jiarun and Yang, Hao and Zhou, Hong-Yu and Xi, Yan and Yu, Lequan and Li, Cheng and Liang, Yong and Shi, Guangming and Yu, Yizhou and Zhang, Shaoting and Zheng, Hairong and Wang, Shanshan },\n title = { { Swin-UMamba: Mamba-based UNet with ImageNet-based pretraining } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate medical image segmentation demands the integration of multi-scale information, spanning from local features to global dependencies. However, it is challenging for existing methods to model long-range global information, where convolutional neural networks are constrained by their local receptive fields, and vision transformers suffer from high quadratic complexity of their attention mechanism. Recently, Mamba-based models have gained great attention for their impressive ability in long sequence modeling. Several studies have demonstrated that these models can outperform popular vision models in various tasks, offering higher accuracy, lower memory consumption, and less computational burden. However, existing Mamba-based models are mostly trained from scratch and do not explore the power of pretraining, which has been proven to be quite effective for data-efficient medical image analysis. This paper introduces a novel Mamba-based model, Swin-UMamba, designed specifically for medical image segmentation tasks, leveraging the advantages of ImageNet-based pretraining. Our experimental results reveal the vital role of ImageNet-based training in enhancing the performance of Mamba-based models. Swin-UMamba demonstrates superior performance with a large margin compared to CNNs, ViTs, and latest Mamba-based models. Notably, on AbdomenMRI, Encoscopy, and Microscopy datasets, Swin-UMamba outperforms its closest counterpart U-Mamba by an average score of 2.72%. The code and models of Swin-UMamba are publicly available at: https:\/\/github.com\/JiarunLiu\/Swin-UMamba.", "title":"Swin-UMamba: Mamba-based UNet with ImageNet-based pretraining", "authors":[ "Liu, Jiarun", "Yang, Hao", "Zhou, Hong-Yu", "Xi, Yan", "Yu, Lequan", "Li, Cheng", "Liang, Yong", "Shi, Guangming", "Yu, Yizhou", "Zhang, Shaoting", "Zheng, Hairong", "Wang, Shanshan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JiarunLiu\/Swin-UMamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":228 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3995_paper.pdf", "bibtext":"@InProceedings{ Tap_SuperField_MICCAI2024,\n author = { Tapp, Austin and Zhao, Can and Roth, Holger R. and Tanedo, Jeffrey and Anwar, Syed Muhammad and Bourke, Niall J. and Hajnal, Joseph V. and Nankabirwa, Victoria and Deoni, Sean and Lepore, Natasha and Linguraru, Marius George },\n title = { { Super-Field MRI Synthesis for Infant Brains Enhanced by Dual Channel Latent Diffusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In resource-limited settings, portable ultra-low-field (uLF, i.e., 0.064T) magnetic resonance imaging (MRI) systems expand accessibility of radiological scanning, particularly for low-income areas as well as underserved populations like neonates and infants. However, compared to high-field (HF, e.g., \u2265 1.5T) systems, inferior image quality in uLF scanning poses challenges for research and clinical use. To address this, we introduce Super-Field Network (SFNet), a custom swinUNETRv2 with generative adversarial network components that uses uLF MRIs to generate super-field (SF) images comparable to HF MRIs. We acquired a cohort of infant data (n=30, aged 0-2 years) with paired uLF-HF MRI data from a resource-limited setting with an underrepresented population in research. To enhance the small dataset, we present a novel use of latent diffusion to create dual-channel (uLF-HF) paired MRIs. We compare SFNet with state-of-the-art synthesis methods by HF-SF image similarity perceptual scores and by automated HF and SF segmentations of white matter (WM), gray matter (GM), and cerebrospinal fluid (CSF). The best performance was achieved by SFNet trained on the latent diffusion enhanced dataset yielding state-of-the-art results in Fr\u00e9chet inception distance at 9.08 \u00b1 1.21, perceptual similarity at 0.11 \u00b1 0.01, and PSNR at 22.64 \u00b1 1.31. True HF and SF segmentations had a strong overlap with Dice similarity coefficients of 0.71 \u00b1 0.1, 0.79 \u00b1 0.2, and 0.73 \u00b1 0.08 for WM, GM, and CSF, respectively, in the developing infant brain with incomplete myelination, and displayed 166%, 107%, and 106% improvement over respective uLF-based segmentation metrics. SF MRI supports health equity by enhancing the clinical use of uLF imaging systems and improving the diagnostic capabilities of low-cost portable MRI systems in resource-limited settings and for underserved populations. Our code is made openly available at https:\/\/github.com\/AustinTapp\/SFnet.", "title":"Super-Field MRI Synthesis for Infant Brains Enhanced by Dual Channel Latent Diffusion", "authors":[ "Tapp, Austin", "Zhao, Can", "Roth, Holger R.", "Tanedo, Jeffrey", "Anwar, Syed Muhammad", "Bourke, Niall J.", "Hajnal, Joseph V.", "Nankabirwa, Victoria", "Deoni, Sean", "Lepore, Natasha", "Linguraru, Marius George" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/AustinTapp\/SFnet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":229 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1232_paper.pdf", "bibtext":"@InProceedings{ Wan_EndoGSLAM_MICCAI2024,\n author = { Wang, Kailing and Yang, Chen and Wang, Yuehao and Li, Sikuang and Wang, Yan and Dou, Qi and Yang, Xiaokang and Shen, Wei },\n title = { { EndoGSLAM: Real-Time Dense Reconstruction and Tracking in Endoscopic Surgeries using Gaussian Splatting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Precise camera tracking, high-fidelity 3D tissue reconstruction, and real-time online visualization are critical for intrabody medical imaging devices such as endoscopes and capsule robots. However, existing SLAM (Simultaneous Localization and Mapping) methods often struggle to achieve both complete high-quality surgical field reconstruction and efficient computation, restricting their intraoperative applications among endoscopic surgeries. In this paper, we introduce EndoGSLAM, an efficient SLAM approach for endoscopic surgeries, which integrates streamlined Gaussian representation and differentiable rasterization to facilitate over 100 fps rendering speed during online camera tracking and tissue reconstructing. Extensive experiments show that EndoGSLAM achieves a better trade-off between intraoperative availability and reconstruction quality than traditional or neural SLAM approaches, showing tremendous potential for endoscopic surgeries.", "title":"EndoGSLAM: Real-Time Dense Reconstruction and Tracking in Endoscopic Surgeries using Gaussian Splatting", "authors":[ "Wang, Kailing", "Yang, Chen", "Wang, Yuehao", "Li, Sikuang", "Wang, Yan", "Dou, Qi", "Yang, Xiaokang", "Shen, Wei" ], "id":"Conference", "arxiv_id":"2403.15124", "GitHub":[ "https:\/\/github.com\/Loping151\/EndoGSLAM" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.15124", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":8, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":230 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0448_paper.pdf", "bibtext":"@InProceedings{ Liu_TaggedtoCine_MICCAI2024,\n author = { Liu, Xiaofeng and Xing, Fangxu and Bian, Zhangxing and Arias-Vergara, Tomas and Pe\u0301rez-Toro, Paula Andrea and Maier, Andreas and Stone, Maureen and Zhuo, Jiachen and Prince, Jerry L. and Woo, Jonghye },\n title = { { Tagged-to-Cine MRI Sequence Synthesis via Light Spatial-Temporal Transformer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tagged magnetic resonance imaging (MRI) has been successfully used to track the motion of internal tissue points within moving organs. Typically, to analyze motion using tagged MRI, cine MRI data in the same coordinate system are acquired, incurring additional time and costs. Consequently, tagged-to-cine MR synthesis holds the potential to reduce the extra acquisition time and costs associated with cine MRI, without disrupting downstream motion analysis tasks. Previous approaches have processed each frame independently, thereby overlooking the fact that complementary information from occluded regions of the tag patterns could be present in neighboring frames exhibiting motion. Furthermore, the inconsistent visual appearance, e.g., tag fading, across frames can reduce synthesis performance. To address this, we propose an efficient framework for tagged-to-cine MR sequence synthesis, leveraging both spatial and temporal information with relatively limited data. Specifically, we follow a split-and-integral protocol to balance spatial-temporal modeling efficiency and consistency. The light spatial-temporal transformer (LiST$^2$) is designed to exploit the local and global attention in motion sequence with relatively lightweight training parameters. The directional product relative position-time bias is adapted to make the model aware of the spatial-temporal correlation, while the shifted window is used for motion alignment. Then, a recurrent sliding fine-tuning (ReST) scheme is applied to further enhance the temporal consistency. Our framework is evaluated on paired tagged and cine MRI sequences, demonstrating superior performance over comparison methods.", "title":"Tagged-to-Cine MRI Sequence Synthesis via Light Spatial-Temporal Transformer", "authors":[ "Liu, Xiaofeng", "Xing, Fangxu", "Bian, Zhangxing", "Arias-Vergara, Tomas", "Pe\u0301rez-Toro, Paula Andrea", "Maier, Andreas", "Stone, Maureen", "Zhuo, Jiachen", "Prince, Jerry L.", "Woo, Jonghye" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":231 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0960_paper.pdf", "bibtext":"@InProceedings{ Li_Textmatch_MICCAI2024,\n author = { Li, Aibing and Zeng, Xinyi and Zeng, Pinxian and Ding, Sixian and Wang, Peng and Wang, Chengdi and Wang, Yan },\n title = { { Textmatch: Using Text Prompts to Improve Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning, a paradigm involving training models with limited labeled data alongside abundant unlabeled images, has significantly ad-vanced medical image segmentation. However, the absence of label supervi-sion introduces noise during training, posing a challenge in achieving a well-clustered feature space essential for acquiring discriminative representations in segmentation tasks. In this context, the emergence of vision-language (VL) models in natural image processing has showcased promising capabili-ties in aiding object localization through the utilization of text prompts, demonstrating potential as an effective solution for addressing annotation scarcity. Building upon this insight, we present Textmatch, a novel frame-work that leverages text prompts to enhance segmentation performance in semi-supervised medical image segmentation. Specifically, our approach in-troduces a Bilateral Prompt Decoder (BPD) to address modal discrepancies between visual and linguistic features, facilitating the extraction of comple-mentary information from multi-modal data. Then, we propose the Multi-views Consistency Regularization (MCR) strategy to ensure consistency among multiple views derived from perturbations in both image and text domains, reducing the impact of noise and generating more reliable pseudo-labels. Furthermore, we leverage these pseudo-labels and conduct Pseudo-Label Guided Contrastive Learning (PGCL) in the feature space to encourage intra-class aggregation and inter-class separation between features and proto-types, thus enhancing the generation of more discriminative representations for segmentation. Extensive experiments on two publicly available datasets demonstrate that our framework outperforms previous methods employing image-only and multi-modal approaches, establishing a new state-of-the-art performance.", "title":"Textmatch: Using Text Prompts to Improve Semi-supervised Medical Image Segmentation", "authors":[ "Li, Aibing", "Zeng, Xinyi", "Zeng, Pinxian", "Ding, Sixian", "Wang, Peng", "Wang, Chengdi", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":232 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1608_paper.pdf", "bibtext":"@InProceedings{ She_DCDiff_MICCAI2024,\n author = { Shen, Ruochong and Li, Xiaoxu and Li, Yuan-Fang and Sui, Chao and Peng, Yu and Ke, Qiuhong },\n title = { { DCDiff: Dual-Domain Conditional Diffusion for CT Metal Artifact Reduction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Metallic implants in X-ray Computed Tomography (CT) scans can lead to undesirable artifacts, adversely affecting the quality of images and, consequently, the effectiveness of clinical treatment. Metal Artifact Reduction (MAR) is essential for improving diagnostic accuracy, yet this task is challenging due to the uncertainty associated with the affected regions. In this paper, inspired by the capabilities of diffusion models in generating high-quality images, we present a novel MAR framework termed Dual-Domain Conditional Diffusion (DCDiff). Specifically, our DCDiff takes dual-domain information as the input conditions for generating clean images: 1) the image domain incorporating raw CT image and the filtered back project (FBP) output of the metal trace, and 2) the sinogram domain achieved with a new diffusion interpolation algorithm. Experimental results demonstrate that our DCDiff outperforms state-of-the-art methods, showcasing its effectiveness for MAR.", "title":"DCDiff: Dual-Domain Conditional Diffusion for CT Metal Artifact Reduction", "authors":[ "Shen, Ruochong", "Li, Xiaoxu", "Li, Yuan-Fang", "Sui, Chao", "Peng, Yu", "Ke, Qiuhong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":233 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1667_paper.pdf", "bibtext":"@InProceedings{ Mat_Learning_MICCAI2024,\n author = { Matsuo, Shinnosuke and Suehiro, Daiki and Uchida, Seiichi and Ito, Hiroaki and Terada, Kazuhiro and Yoshizawa, Akihiko and Bise, Ryoma },\n title = { { Learning from Partial Label Proportions for Whole Slide Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we address the segmentation of tumor subtypes in whole slide images (WSI) by utilizing incomplete label proportions. Specifically, we utilize partial' label proportions, which give the proportions among tumor subtypes but do not give the proportion between tumor and non-tumor. Partial label proportions are recorded as the standard diagnostic information by pathologists, and we, therefore, want to use them for realizing the segmentation model that can classify each WSI patch into one of the tumor subtypes or non-tumor. We call this problem `learning from partial label proportions (LPLP)\u2019\u2019 and formulate the problem as a weakly supervised learning problem. Then, we propose an efficient algorithm for this challenging problem by decomposing it into two weakly supervised learning subproblems: multiple instance learning (MIL) and learning from label proportions (LLP). These subproblems are optimized efficiently in the end-to-end manner. The effectiveness of our algorithm is demonstrated through experiments conducted on two WSI datasets. This code is available at https:\/\/github.com\/matsuo-shinnosuke\/LPLP.", "title":"Learning from Partial Label Proportions for Whole Slide Image Segmentation", "authors":[ "Matsuo, Shinnosuke", "Suehiro, Daiki", "Uchida, Seiichi", "Ito, Hiroaki", "Terada, Kazuhiro", "Yoshizawa, Akihiko", "Bise, Ryoma" ], "id":"Conference", "arxiv_id":"2405.09041", "GitHub":[ "https:\/\/github.com\/matsuo-shinnosuke\/LPLP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":234 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2251_paper.pdf", "bibtext":"@InProceedings{ Woo_Feature_MICCAI2024,\n author = { Woodland, McKell and Castelo, Austin and Al Taie, Mais and Albuquerque Marques Silva, Jessica and Eltaher, Mohamed and Mohn, Frank and Shieh, Alexander and Kundu, Suprateek and Yung, Joshua P. and Patel, Ankit B. and Brock, Kristy K. },\n title = { { Feature Extraction for Generative Medical Imaging Evaluation: New Evidence Against an Evolving Trend } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fr\u00e9chet Inception Distance (FID) is a widely used metric for assessing synthetic image quality. It relies on an ImageNet-based feature extractor, making its applicability to medical imaging unclear. A recent trend is to adapt FID to medical imaging through feature extractors trained on medical images. Our study challenges this practice by demonstrating that ImageNet-based extractors are more consistent and aligned with human judgment than their RadImageNet counterparts. We evaluated sixteen StyleGAN2 networks across four medical imaging modalities and four data augmentation techniques with Fr\u00e9chet distances (FDs) computed using eleven ImageNet or RadImageNet-trained feature extractors. Comparison with human judgment via visual Turing tests revealed that ImageNet-based extractors produced rankings consistent with human judgment, with the FD derived from the ImageNet-trained SwAV extractor significantly correlating with expert evaluations. In contrast, RadImageNet-based rankings were volatile and inconsistent with human judgment. Our findings challenge prevailing assumptions, providing novel evidence that medical image-trained feature extractors do not inherently improve FDs and can even compromise their reliability. Our code is available at https:\/\/github.com\/mckellwoodland\/fid-med-eval.", "title":"Feature Extraction for Generative Medical Imaging Evaluation: New Evidence Against an Evolving Trend", "authors":[ "Woodland, McKell", "Castelo, Austin", "Al Taie, Mais", "Albuquerque Marques Silva, Jessica", "Eltaher, Mohamed", "Mohn, Frank", "Shieh, Alexander", "Kundu, Suprateek", "Yung, Joshua P.", "Patel, Ankit B.", "Brock, Kristy K." ], "id":"Conference", "arxiv_id":"2311.13717", "GitHub":[ "https:\/\/github.com\/mckellwoodland\/fid-med-eval" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":235 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2483_paper.pdf", "bibtext":"@InProceedings{ Lee_Referencefree_MICCAI2024,\n author = { Lee, Kyungryun and Jeong, Won-Ki },\n title = { { Reference-free Axial Super-resolution of 3D Microscopy Images using Implicit Neural Representation with a 2D Diffusion Prior } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Analysis and visualization of 3D microscopy images pose\nchallenges due to anisotropic axial resolution, demanding volumetric super-resolution along the axial direction. While training a learning-based 3D super-resolution model seems to be a straightforward solution, it requires ground truth isotropic volumes and suffers from the curse of dimensionality. Therefore, existing methods utilize 2D neural networks to reconstruct each axial slice, eventually piecing together the entire volume. However, reconstructing each slice in the pixel domain fails to\ngive consistent reconstruction in all directions leading to misalignment artifacts. In this work, we present a reconstruction framework based on implicit neural representation (INR), which allows 3D coherency even when optimized by independent axial slices in a batch-wise manner. Our method optimizes a continuous volumetric representation from lowresolution axial slices, using a 2D diffusion prior trained on high-resolution lateral slices without requiring isotropic volumes. Through experiments on real and synthetic anisotropic microscopy images, we demonstrate that our method surpasses other state-of-the-art reconstruction methods.", "title":"Reference-free Axial Super-resolution of 3D Microscopy Images using Implicit Neural Representation with a 2D Diffusion Prior", "authors":[ "Lee, Kyungryun", "Jeong, Won-Ki" ], "id":"Conference", "arxiv_id":"2408.08616", "GitHub":[ "https:\/\/github.com\/hvcl\/INR-diffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":236 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1577_paper.pdf", "bibtext":"@InProceedings{ Wu_FACMIC_MICCAI2024,\n author = { Wu, Yihang and Desrosiers, Christian and Chaddad, Ahmad },\n title = { { FACMIC: Federated Adaptative CLIP Model for Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning (FL) has emerged as a promising approach to medical image analysis that allows deep model training using decentralized data while ensuring data privacy. However, in the field of FL, communication cost plays a critical role in evaluating the performance of the model. Thus, transferring vision foundation models can be particularly challenging due to the significant resource costs involved. In this paper, we introduce a federated adaptive Contrastive Language Image Pretraining (\\clip{}) model designed for classification tasks. We employ a light-weight and efficient feature attention module for \\clip{} that selects suitable features for each client\u2019s data. Additionally, we propose a domain adaptation technique to reduce differences in data distribution between clients.\nExperimental results on four publicly available datasets demonstrate the superior performance of FACMIC in dealing with real-world and multisource medical imaging data. Our codes are available at \\url{https:\/\/github.com\/AIPMLab\/FACMIC}.", "title":"FACMIC: Federated Adaptative CLIP Model for Medical Image Classification", "authors":[ "Wu, Yihang", "Desrosiers, Christian", "Chaddad, Ahmad" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/AIPMLab\/FACMIC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":237 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1889_paper.pdf", "bibtext":"@InProceedings{ Ama_Goalconditioned_MICCAI2024,\n author = { Amadou, Abdoul Aziz and Singh, Vivek and Ghesu, Florin C. and Kim, Young-Ho and Stanciulescu, Laura and Sai, Harshitha P. and Sharma, Puneet and Young, Alistair and Rajani, Ronak and Rhode, Kawal },\n title = { { Goal-conditioned reinforcement learning for ultrasound navigation guidance } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Transesophageal echocardiography (TEE) plays a pivotal role in cardiology for diagnostic and interventional procedures. However, using it effectively requires extensive training due to the intricate nature of image acquisition and interpretation. To enhance the efficiency of novice sonographers and reduce variability in scan acquisitions, we propose a novel ultrasound (US) navigation assistance method based on contrastive learning as goal-conditioned reinforcement learning (GCRL). We augment the previous framework using a novel contrastive patient batching method (CPB) and a data-augmented contrastive loss, both of which we demonstrate are essential to ensure generalization to anatomical variations across patients. The proposed framework enables navigation to both standard diagnostic as well as intricate interventional views with a single model. Our method was developed with a large dataset of 789 patients and obtained an average error of 6.56 mm in position and 9.36 degrees in angle on a testing dataset of 140 patients, which is competitive or superior to models trained on individual views. Furthermore, we quantitatively validate our method\u2019s ability to navigate to interventional views such as the Left Atrial Appendage (LAA) view used in LAA closure. Our approach holds promise in providing valuable guidance during transesophageal ultrasound examinations, contributing to the advancement of skill acquisition for cardiac ultrasound practitioners.", "title":"Goal-conditioned reinforcement learning for ultrasound navigation guidance", "authors":[ "Amadou, Abdoul Aziz", "Singh, Vivek", "Ghesu, Florin C.", "Kim, Young-Ho", "Stanciulescu, Laura", "Sai, Harshitha P.", "Sharma, Puneet", "Young, Alistair", "Rajani, Ronak", "Rhode, Kawal" ], "id":"Conference", "arxiv_id":"2405.01409", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":238 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1536_paper.pdf", "bibtext":"@InProceedings{ Shi_MoRA_MICCAI2024,\n author = { Shi, Zhiyi and Kim, Junsik and Li, Wanhua and Li, Yicong and Pfister, Hanspeter },\n title = { { MoRA: LoRA Guided Multi-Modal Disease Diagnosis with Missing Modality } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multi-modal pre-trained models efficiently extract and fuse features from different modalities with low memory requirements for fine-tuning. Despite this efficiency, their application in disease diagnosis is under-explored. A significant challenge is the frequent occurrence of missing modalities, which impairs performance. Additionally, fine-tuning the entire pre-trained model demands substantial computational resources. To address these issues, we introduce Modality-aware Low-Rank Adaptation (MoRA), a computationally efficient method. MoRA projects each input to a low intrinsic dimension but uses different modality-aware up-projections for modality-specific adaptation in cases of missing modalities. Practically, MoRA integrates into the first block of the model, significantly improving performance when a modality is missing. It requires minimal computational resources, with less than 1.6\\% of the trainable parameters needed compared to training the entire model. Experimental results show that MoRA outperforms existing techniques in disease diagnosis, demonstrating superior performance, robustness, and training efficiency. The code link is: https:\/\/github.com\/zhiyiscs\/MoRA.", "title":"MoRA: LoRA Guided Multi-Modal Disease Diagnosis with Missing Modality", "authors":[ "Shi, Zhiyi", "Kim, Junsik", "Li, Wanhua", "Li, Yicong", "Pfister, Hanspeter" ], "id":"Conference", "arxiv_id":"2408.09064", "GitHub":[ "https:\/\/github.com\/zhiyiscs\/MoRA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":239 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0483_paper.pdf", "bibtext":"@InProceedings{ Tme_Deep_MICCAI2024,\n author = { Tmenova, Oleksandra and Velikova, Yordanka and Saleh, Mahdi and Navab, Nassir },\n title = { { Deep Spectral Methods for Unsupervised Ultrasound Image Interpretation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ultrasound imaging is challenging to interpret due to non-uniform intensities, low contrast, and inherent artifacts, necessitating extensive training for non-specialists. Advanced representation with clear tissue structure separation could greatly assist clinicians in mapping underlying anatomy and distinguishing between tissue layers. Decomposing an image into semantically meaningful segments is mainly achieved using supervised segmentation algorithms. Unsupervised methods are beneficial, as acquiring large labeled datasets is difficult and costly, but despite their advantages, they still need to be explored in ultrasound. This paper proposes a novel unsupervised deep learning strategy tailored to ultrasound to obtain easily interpretable tissue separations. We integrate key concepts from unsupervised deep spectral methods, which combine spectral graph theory with deep learning methods. We utilize self-supervised transformer features for spectral clustering to generate meaningful segments based on ultrasound-specific metrics and shape and positional priors, ensuring semantic consistency across the dataset. We evaluate our unsupervised deep learning strategy on three ultrasound datasets, showcasing qualitative results across anatomical contexts without label requirements. We also conduct a comparative analysis against other clustering algorithms to demonstrate superior segmentation performance, boundary preservation, and label consistency.", "title":"Deep Spectral Methods for Unsupervised Ultrasound Image Interpretation", "authors":[ "Tmenova, Oleksandra", "Velikova, Yordanka", "Saleh, Mahdi", "Navab, Nassir" ], "id":"Conference", "arxiv_id":"2408.02043", "GitHub":[ "https:\/\/github.com\/alexaatm\/UnsupervisedSegmentor4Ultrasound" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":240 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1262_paper.pdf", "bibtext":"@InProceedings{ Wu_Noise_MICCAI2024,\n author = { Wu, Chongwei and Zeng, Xiaoyu and Wang, Hao and Zhang, Xu and Fang, Wei and Li, Qiang and Wang, Zhiwei },\n title = { { Noise Removed Inconsistency Activation Map for Unsupervised Registration of Brain Tumor MRI between Pre-operative and Follow-up Phases } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Structure inconsistency is the key challenge in registration of brain MRI between pre-operative and follow-up phases, which misguides the objective of image similarity maximization, and thus degrades the performance significantly. The current solutions rely on bidirectional registration to find the mismatched deformation fields as the inconsistent areas, and use them to filter out the unreliable similarity measurements. However, this is sensitive to the accumulated registration errors, and thus yields inaccurate inconsistent areas. In this paper, we provide a more efficient and accurate way, by letting the registration model itself to `speak out\u2019 a Noise Removed Inconsistency Activation Map (NR-IAM) as the indicator of structure inconsistencies. We first obtain an IAM by use of the gradient-weighted feature maps but adopting an inverse direction. With this manner only, the resulting inconsistency map often occurs false highlights near some common structures like venous sinus. Therefore, we further introduce a statistical approach to remove the common erroneous activations in IAM to obtain NR-IAM. The experimental results on both public and private datasets demonstrate that by use of our proposed NR-IAM to guide the optimization, the registration performance can be significantly boosted, and is superior over that relying on the bidirectional registration by decreasing mean registration error by 5\\% and 4\\% in near tumor and far from tumor regions, respectively. Codes are available at https:\/\/github.com\/chongweiwu\/NR-IAM.", "title":"Noise Removed Inconsistency Activation Map for Unsupervised Registration of Brain Tumor MRI between Pre-operative and Follow-up Phases", "authors":[ "Wu, Chongwei", "Zeng, Xiaoyu", "Wang, Hao", "Zhang, Xu", "Fang, Wei", "Li, Qiang", "Wang, Zhiwei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/chongweiwu\/NR-IAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":241 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2168_paper.pdf", "bibtext":"@InProceedings{ Xio_MoME_MICCAI2024,\n author = { Xiong, Conghao and Chen, Hao and Zheng, Hao and Wei, Dong and Zheng, Yefeng and Sung, Joseph J. Y. and King, Irwin },\n title = { { MoME: Mixture of Multimodal Experts for Cancer Survival Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival prediction requires integrating Whole Slide Images (WSIs) and genomics, a task complicated by significant heterogeneity and complex inter- and intra-modal interactions between modalities. Previous methods used co-attention, fusing features only once after separate encoding, which is insufficient to model such a complex task due to modality heterogeneity. To this end, we propose a Biased Progressive Encoding (BPE) paradigm, performing encoding and fusion simultaneously. This paradigm uses one modality as a reference when encoding the other, fostering deep fusion of the modalities through multiple iterations, progressively reducing the cross-modal disparities and facilitating complementary interactions. Besides, survival prediction involves biomarkers from WSIs, genomics, and their integrative analysis. Key biomarkers may exist in different modalities under individual variations, necessitating the model flexibility. Hence, we further propose a Mixture of Multimodal Experts layer to dynamically select tailored experts in each stage of the BPE paradigm. Experts incorporate reference information from another modality to varying degrees, enabling a balanced or biased focus on different modalities during the encoding process. The experimental results demonstrate the superior performance of our method on various datasets, including TCGA-BLCA, TCGA-UCEC and TCGA-LUAD. Codes are available at https:\/\/github.com\/BearCleverProud\/MoME.", "title":"MoME: Mixture of Multimodal Experts for Cancer Survival Prediction", "authors":[ "Xiong, Conghao", "Chen, Hao", "Zheng, Hao", "Wei, Dong", "Zheng, Yefeng", "Sung, Joseph J. Y.", "King, Irwin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/BearCleverProud\/MoME" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":242 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4012_paper.pdf", "bibtext":"@InProceedings{ Qia_Medical_MICCAI2024,\n author = { Qiao, Qiang and Wang, Wenyu and Qu, Meixia and Su, Kun and Jiang, Bin and Guo, Qiang },\n title = { { Medical Image Segmentation via Single-Source Domain Generalization with Random Amplitude Spectrum Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The field of medical image segmentation is challenged by domain generalization (DG) due to domain shifts in clinical datasets. The DG challenge is exacerbated by the scarcity of medical data and privacy concerns. Traditional single-source domain generalization (SSDG) methods primarily rely on stacking data augmentation techniques to minimize domain discrepancies. In this paper, we propose Random Amplitude Spectrum Synthesis (RASS) as a training augmentation for medical images. RASS enhances model generalization by simulating distribution changes from a frequency perspective. This strategy introduces variability by applying amplitude-dependent perturbations to ensure broad coverage of potential domain variations. Furthermore, we propose random mask shuffle and reconstruction components, which can enhance the ability of the backbone to process structural information and increase resilience intra- and cross-domain changes. The proposed Random Amplitude Spectrum Synthesis for Single-Source Domain Generalization (RAS^4DG) is validated on 3D fetal brain images and 2D fundus photography, and achieves an improved DG segmentation performance compared to other SSDG models. The source code is available at: https:\/\/github.com\/qintianjian-lab\/RAS4DG.", "title":"Medical Image Segmentation via Single-Source Domain Generalization with Random Amplitude Spectrum Synthesis", "authors":[ "Qiao, Qiang", "Wang, Wenyu", "Qu, Meixia", "Su, Kun", "Jiang, Bin", "Guo, Qiang" ], "id":"Conference", "arxiv_id":"2409.04768", "GitHub":[ "https:\/\/github.com\/qintianjian-lab\/ras4dg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":243 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3757_paper.pdf", "bibtext":"@InProceedings{ Gau_Immuneguided_MICCAI2024,\n author = { Gautam, Tanishq and Gonzalez, Karina P. and Salvatierra, Maria E. and Serrano, Alejandra and Chen, Pingjun and Pan, Xiaoxi and Shokrollahi, Yasin and Ranjbar, Sara and Rodriguez, Leticia and Team, Patient Mosaic and Solis-Soto, Luisa and Yuan, Yinyin and Castillo, Simon P. },\n title = { { Immune-guided AI for Reproducible Regions of Interest Selection in Multiplex Immunofluorescence Pathology Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Selecting regions of interest (ROIs) in whole-slide histology images (WSIs) is a crucial step for spatial molecular profiling. As a general practice, pathologists manually select ROIs within each WSI based on morphological tumor markers to guide spatial profiling, which can be inconsistent and subjective. To enhance reproducibility and avoid inter-pathologist variability, we introduce a novel immune-guided end-to-end pipeline to automate the ROI selection in multiplex immunofluorescence (mIF) WSIs stained with three cell markers (Syto13, CD45, PanCK). First, we estimate immune infiltration (CD45+ expression) scores at the grid level in each WSI. Then, we incorporate the Pathology Language and Image Pre-Training (PLIP) foundational model to extract features from each grid and further select a subset of grids representative of the whole slide that comparatively matches pathologists\u2019 assessment. Further, we implement state-of-the-art detection models for ROI detection in each grid, incorporating learning from pathologists\u2019 ROI selection. Our study shows a significant correlation between our automated method and pathologists\u2019 ROI selection across five different types of carcinomas, as evidenced by a significant Spearman\u2019s correlation coefficient (> 0.785, p < 0.001), substantial inter-rater agreement (Cohen\u2019s kappa > 0.671), and the ability to replicate the ROI selection made by independent pathologists with excellent average performance (0.968 precision and 0.991 mean average precision at a 0.5 intersection-over-union). By minimizing manual intervention, our solution provides a flexible framework that potentially adapts to various markers, thus enhancing the efficiency and accuracy of digital pathology analyses.", "title":"Immune-guided AI for Reproducible Regions of Interest Selection in Multiplex Immunofluorescence Pathology Imaging", "authors":[ "Gautam, Tanishq", "Gonzalez, Karina P.", "Salvatierra, Maria E.", "Serrano, Alejandra", "Chen, Pingjun", "Pan, Xiaoxi", "Shokrollahi, Yasin", "Ranjbar, Sara", "Rodriguez, Leticia", "Team, Patient Mosaic", "Solis-Soto, Luisa", "Yuan, Yinyin", "Castillo, Simon P." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":244 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0821_paper.pdf", "bibtext":"@InProceedings{ Son_SDCL_MICCAI2024,\n author = { Song, Bentao and Wang, Qingfeng },\n title = { { SDCL: Students Discrepancy-Informed Correction Learning for Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised medical image segmentation (SSMIS) has\nbeen demonstrated the potential to mitigate the issue of limited medical labeled data. However, confirmation and cognitive biases may affect the prevalent teacher-student based SSMIS methods due to erroneous pseudo-labels. To tackle this challenge, we improve the mean teacher approach and propose the Students Discrepancy-Informed Correction Learning (SDCL) framework that includes two students and one nontrainable teacher, which utilizes the segmentation difference between the\ntwo students to guide the self-correcting learning. The essence of SDCL is to identify the areas of segmentation discrepancy as the potential bias areas, and then encourage the model to review the correct cognition and rectify their own biases in these areas. To facilitate the bias correction learning with continuous review and rectification, two correction loss functions are employed to minimize the correct segmentation voxel\ndistance and maximize the erroneous segmentation voxel entropy. We conducted experiments on three public medical image datasets: two 3D datasets (CT and MRI) and one 2D dataset (MRI). The results show that our SDCL surpasses the current State-of-the-Art (SOTA) methods by 2.57%, 3.04%, and 2.34% in the Dice score on the Pancreas, LA, and ACDC datasets, respectively. In addition, the accuracy of our method is\nvery close to the fully supervised method on the ACDC dataset, and even exceeds the fully supervised method on the Pancreas and LA dataset.(Code available at https:\/\/github.com\/pascalcpp\/SDCL).", "title":"SDCL: Students Discrepancy-Informed Correction Learning for Semi-supervised Medical Image Segmentation", "authors":[ "Song, Bentao", "Wang, Qingfeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/pascalcpp\/SDCL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":245 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0065_paper.pdf", "bibtext":"@InProceedings{ Cho_Embracing_MICCAI2024,\n author = { Chou, Yu-Cheng and Zhou, Zongwei and Yuille, Alan },\n title = { { Embracing Massive Medical Data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"As massive medical data become available with an increasing number of scans, expanding classes, and varying sources, prevalent training paradigms\u2013where AI is trained with multiple passes over fixed, finite datasets\u2013face significant challenges. First, training AI all at once on such massive data is impractical as new scans\/sources\/classes continuously arrive. Second, training AI continuously on new scans\/sources\/classes can lead to catastrophic forgetting, where AI forgets old data as it learns new data, and vice versa. To address these two challenges, we propose an online learning method that enables training AI from massive medical data. Instead of repeatedly training AI on randomly selected data samples, our method identifies the most significant samples for the current AI model based on their data uniqueness and prediction uncertainty, then trains the AI on these selective data samples. Compared with prevalent training paradigms, our method not only improves data efficiency by enabling training on continual data streams, but also mitigates catastrophic forgetting by selectively training AI on significant data samples that might otherwise be forgotten, outperforming by 15% in Dice score for multi-organ and tumor segmentation.", "title":"Embracing Massive Medical Data", "authors":[ "Chou, Yu-Cheng", "Zhou, Zongwei", "Yuille, Alan" ], "id":"Conference", "arxiv_id":"2407.04687", "GitHub":[ "https:\/\/github.com\/MrGiovanni\/OnlineLearning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":246 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1854_paper.pdf", "bibtext":"@InProceedings{ Zhu_Lifelong_MICCAI2024,\n author = { Zhu, Xinyu and Jiang, Zhiguo and Wu, Kun and Shi, Jun and Zheng, Yushan },\n title = { { Lifelong Histopathology Whole Slide Image Retrieval via Distance Consistency Rehearsal } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Content-based histopathological image retrieval (CBHIR) has gained attention in recent years, offering the capability to return histopathology images that are content-wise similar to the query one from an established database. However, in clinical practice, the continuously expanding size of WSI databases limits the practical application of the current CBHIR methods. In this paper, we propose a Lifelong Whole Slide Retrieval (LWSR) framework to address the challenges of catastrophic forgetting by progressive model updating on continuously growing retrieval database. Our framework aims to achieve the balance between stability and plasticity during continuous learning. To preserve system plasticity, we utilize local memory bank with reservoir sampling method to save instances, which can comprehensively encompass the feature spaces of both old and new tasks. Furthermore, A distance consistency rehearsal (DCR) module is designed to ensure the retrieval queue\u2019s consistency for previous tasks, which is regarded as stability within a lifelong CBHIR system. We evaluated the proposed method on four public WSI datasets from TCGA projects. The experimental results have demonstrated the proposed method is effective and is superior to the state-of-the-art methods.", "title":"Lifelong Histopathology Whole Slide Image Retrieval via Distance Consistency Rehearsal", "authors":[ "Zhu, Xinyu", "Jiang, Zhiguo", "Wu, Kun", "Shi, Jun", "Zheng, Yushan" ], "id":"Conference", "arxiv_id":"2407.08153", "GitHub":[ "https:\/\/github.com\/OliverZXY\/LWSR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":247 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1767_paper.pdf", "bibtext":"@InProceedings{ Zha_Feature_MICCAI2024,\n author = { Zhao, Yimin and Gu, Jin },\n title = { { Feature Fusion Based on Mutual-Cross-Attention Mechanism for EEG Emotion Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"An objective and accurate emotion diagnostic reference is vital to psychologists, especially when dealing with patients who are difficult to communicate with for pathological reasons. Nevertheless, current systems based on Electroencephalography (EEG) data utilized for sentiment discrimination have some problems, including excessive model complexity, mediocre accuracy, and limited interpretability. Consequently, we propose a novel and effective feature fusion mechanism named Mutual-Cross-Attention (MCA). Combining with a specially customized 3D Convolutional Neural Network (3D-CNN), this purely mathematical mechanism adeptly discovers the complementary relationship between time-domain and frequency-domain features in EEG data. Furthermore, the new designed Channel-PSD-DE 3D feature also contributes to the high performance. The proposed method eventually achieves 99.49% (valence) and 99.30% (arousal) accuracy on DEAP dataset. Our code and data is open-sourced at https:\/\/github.com\/ztony0712\/MCA.", "title":"Feature Fusion Based on Mutual-Cross-Attention Mechanism for EEG Emotion Recognition", "authors":[ "Zhao, Yimin", "Gu, Jin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ztony0712\/MCA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":248 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2017_paper.pdf", "bibtext":"@InProceedings{ He_Algebraic_MICCAI2024,\n author = { He, Jin and Liu, Weizhou and Zhao, Shifeng and Tian, Yun and Wang, Shuo },\n title = { { Algebraic Sphere Surface Fitting for Accurate and Efficient Mesh Reconstruction from Cine CMR Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate 3D modeling of the ventricles through cine cardiovascular magnetic resonance (CMR) imaging benefits precise clinical assessment of cardiac morphology and motion. However, the existing short-axis stacks exhibit low spatial resolution in the inter-slice orientation compared to the intra-slice direction, resulting in a sparse representation of the realistic heart. The anisotropic short-axis images pose challenges in directly reconstructing meshes from them. In this work, we propose a surface fitting approach based on the algebraic sphere, which serves as a previous step for various mesh-based applications, to reconstruct a natural ventricular shape from the segmented wireframe-type point cloud. Considering the sparse and layered nature of the point clouds, we first estimate the normals of the point cloud based on dynamic programming and neighborhood selection, followed by fitting a point set surface using a non-compact kernel adapted by layers. Finally, an implicit scalar field representing the signed distance between the query point and the projection point is obtained, and the manifold mesh is extracted by meshing zero iso-surface. Experimental results on two publicly available datasets demonstrate that the proposed framework can accurately and effectively reconstruct ventricular mesh from a single image with better cross-domain generalizability.", "title":"Algebraic Sphere Surface Fitting for Accurate and Efficient Mesh Reconstruction from Cine CMR Images", "authors":[ "He, Jin", "Liu, Weizhou", "Zhao, Shifeng", "Tian, Yun", "Wang, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/hejin9\/algebraic-sphere-surface-fitting" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":249 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0936_paper.pdf", "bibtext":"@InProceedings{ Han_NonAdversarial_MICCAI2024,\n author = { Han, Luyi and Tan, Tao and Zhang, Tianyu and Wang, Xin and Gao, Yuan and Lu, Chunyao and Liang, Xinglong and Dou, Haoran and Huang, Yunzhi and Mann, Ritse },\n title = { { Non-Adversarial Learning: Vector-Quantized Common Latent Space for Multi-Sequence MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Adversarial learning helps generative models translate MRI from source to target sequence when lacking paired samples. However, implementing MRI synthesis with adversarial learning in clinical settings is challenging due to training instability and mode collapse. To address this issue, we leverage intermediate sequences to estimate the common latent space among multi-sequence MRI, enabling the reconstruction of distinct sequences from the common latent space. We propose a generative model that compresses discrete representations of each sequence to estimate the Gaussian distribution of vector-quantized common (VQC) latent space between multiple sequences. Moreover, we improve the latent space consistency with contrastive learning and increase model stability by domain augmentation. Experiments using BraTS2021 dataset show that our non-adversarial model outperforms other GAN-based methods, and VQC latent space aids our model to achieve (1) anti-interference ability, which can eliminate the effects of noise, bias fields, and artifacts, and (2) solid semantic representation ability, with the potential of one-shot segmentation. Our code is publicly available.", "title":"Non-Adversarial Learning: Vector-Quantized Common Latent Space for Multi-Sequence MRI", "authors":[ "Han, Luyi", "Tan, Tao", "Zhang, Tianyu", "Wang, Xin", "Gao, Yuan", "Lu, Chunyao", "Liang, Xinglong", "Dou, Haoran", "Huang, Yunzhi", "Mann, Ritse" ], "id":"Conference", "arxiv_id":"2407.02911", "GitHub":[ "https:\/\/github.com\/fiy2W\/mri\\_seq2seq" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":250 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1788_paper.pdf", "bibtext":"@InProceedings{ Dor_PatientSpecific_MICCAI2024,\n author = { Dorent, Reuben and Torio, Erickson and Haouchine, Nazim and Galvin, Colin and Frisken, Sarah and Golby, Alexandra and Kapur, Tina and Wells III, William M. },\n title = { { Patient-Specific Real-Time Segmentation in Trackerless Brain Ultrasound } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Intraoperative ultrasound (iUS) imaging has the potential to improve surgical outcomes in brain surgery. However, its interpretation is challenging, even for expert neurosurgeons. In this work, we designed the first patient-specific framework that performs brain tumor segmentation in trackerless iUS. To disambiguate ultrasound imaging and adapt to the neurosurgeon\u2019s surgical objective, a patient-specific real-time network is trained using synthetic ultrasound data generated by simulating virtual iUS sweep acquisitions in pre-operative MR data. Extensive experiments performed in real ultrasound data demonstrate the effectiveness of the proposed approach, allowing for adapting to the surgeon\u2019s definition of surgical targets and outperforming non-patient-specific models, neurosurgeon experts, and high-end tracking systems. Our code is available at: \\url{https:\/\/github.com\/ReubenDo\/MHVAE-Seg}.", "title":"Patient-Specific Real-Time Segmentation in Trackerless Brain Ultrasound", "authors":[ "Dorent, Reuben", "Torio, Erickson", "Haouchine, Nazim", "Galvin, Colin", "Frisken, Sarah", "Golby, Alexandra", "Kapur, Tina", "Wells III, William M." ], "id":"Conference", "arxiv_id":"2405.09959", "GitHub":[ "https:\/\/github.com\/ReubenDo\/MHVAE-Seg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":251 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1511_paper.pdf", "bibtext":"@InProceedings{ Hua_Noise_MICCAI2024,\n author = { Huang, Shoujin and Luo, Guanxiong and Wang, Xi and Chen, Ziran and Wang, Yuwan and Yang, Huaishui and Heng, Pheng-Ann and Zhang, Lingyan and Lyu, Mengye },\n title = { { Noise Level Adaptive Diffusion Model for Robust Reconstruction of Accelerated MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In general, diffusion model-based MRI reconstruction methods incrementally remove artificially added noise while imposing data consistency to reconstruct the underlying images. However, real-world MRI acquisitions already contain inherent noise due to thermal fluctuations. This phenomenon is particularly notable when using ultra-fast, high-resolution imaging sequences for advanced research, or using low-field systems favored by low- and middle-income countries. These common scenarios can lead to sub-optimal performance or complete failure of existing diffusion model-based reconstruction techniques. Specifically, as the artificially added noise is gradually removed, the inherent MRI noise becomes increasingly pronounced, making the actual noise level inconsistent with the predefined denoising schedule and consequently inaccurate image reconstruction. To tackle this problem, we propose a posterior sampling strategy with a novel NoIse Level Adaptive Data Consistency (Nila-DC) operation. Extensive experiments are conducted on two public datasets and an in-house clinical dataset with field strength ranging from 0.3T to 3T, showing that our method surpasses the state-of-the-art MRI reconstruction methods, and is highly robust against various noise levels. The code for Nila is available at \\url{https:\/\/github.com\/Solor-pikachu\/Nila}.", "title":"Noise Level Adaptive Diffusion Model for Robust Reconstruction of Accelerated MRI", "authors":[ "Huang, Shoujin", "Luo, Guanxiong", "Wang, Xi", "Chen, Ziran", "Wang, Yuwan", "Yang, Huaishui", "Heng, Pheng-Ann", "Zhang, Lingyan", "Lyu, Mengye" ], "id":"Conference", "arxiv_id":"2403.05245", "GitHub":[ "https:\/\/github.com\/Solor-pikachu\/Nila" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":252 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1468_paper.pdf", "bibtext":"@InProceedings{ He_Embryo_MICCAI2024,\n author = { He, Chloe and Karpavi\u010di\u016bt\u0117, Neringa and Hariharan, Rishabh and Jacques, C\u00e9line and Chambost, J\u00e9r\u00f4me and Malmsten, Jonas and Zaninovic, Nikica and Wouters, Koen and Fr\u00e9our, Thomas and Hickman, Cristina and Vasconcelos, Francisco },\n title = { { Embryo Graphs: Predicting Human Embryo Viability from 3D Morphology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Embryo selection is a critical step in the process of in-vitro fertilisation in which embryologists choose the most viable embryos for transfer into the uterus. In recent years, numerous works have used computer vision to perform embryo selection. However, many of these works have neglected the fact that the embryo is a 3D structure, instead opting to analyse embryo images captured at a single focal plane.\nIn this paper we present a method for the 3D reconstruction of cleavage-stage human embryos. Through a user study, we validate that our reconstructions align with expert assessments. Furthermore, we demonstrate the utility of our approach by generating graph representations that capture biologically relevant features of the embryos. In pilot experiments, we train a graph neural network on these representations and show that it outperforms existing methods in predicting live birth from euploid embryo transfers. Our findings suggest that incorporating 3D reconstruction and graph-based analysis can improve automated embryo selection.", "title":"Embryo Graphs: Predicting Human Embryo Viability from 3D Morphology", "authors":[ "He, Chloe", "Karpavi\u010di\u016bt\u0117, Neringa", "Hariharan, Rishabh", "Jacques, C\u00e9line", "Chambost, J\u00e9r\u00f4me", "Malmsten, Jonas", "Zaninovic, Nikica", "Wouters, Koen", "Fr\u00e9our, Thomas", "Hickman, Cristina", "Vasconcelos, Francisco" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/chlohe\/embryo-graphs" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":253 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1540_paper.pdf", "bibtext":"@InProceedings{ Hua_One_MICCAI2024,\n author = { Huang, Shiqi and Xu, Tingfa and Shen, Ziyi and Saeed, Shaheer Ullah and Yan, Wen and Barratt, Dean C. and Hu, Yipeng },\n title = { { One registration is worth two segmentations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The goal of image registration is to establish spatial correspondence between two or more images, traditionally through dense displacement fields (DDFs) or parametric transformations (e.g., rigid, affine, and splines). Rethinking the existing paradigms of achieving alignment via spatial transformations, we uncover an alternative but more intuitive correspondence representation: a set of corresponding regions-of-interest (ROI) pairs, which we demonstrate to have sufficient representational capability as other correspondence representation methods. Further, it is neither necessary nor sufficient for these ROIs to hold specific anatomical or semantic significance. In turn, we formulate image registration as searching for the same set of corresponding ROIs from both moving and fixed images - in other words, two multi-class segmentation tasks on a pair of images. For a general-purpose and practical implementation, we integrate the segment anything model (SAM) into our proposed algorithms, resulting in a SAM-enabled registration (SAMReg) that does not require any training data, gradient-based fine-tuning or engineered prompts. We experimentally show that the proposed SAMReg is capable of segmenting and matching multiple ROI pairs, which establish sufficiently accurate correspondences, in three clinical applications of registering prostate MR, cardiac MR and abdominal CT images. Based on metrics including Dice and target registration errors on anatomical structures, the proposed registration outperforms both intensity-based iterative algorithms and DDF-predicting learning-based networks, even yielding competitive performance with weakly-supervised registration which requires fully-segmented training data.", "title":"One registration is worth two segmentations", "authors":[ "Huang, Shiqi", "Xu, Tingfa", "Shen, Ziyi", "Saeed, Shaheer Ullah", "Yan, Wen", "Barratt, Dean C.", "Hu, Yipeng" ], "id":"Conference", "arxiv_id":"2405.10879", "GitHub":[ "https:\/\/github.com\/sqhuang0103\/SAMReg.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":254 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2837_paper.pdf", "bibtext":"@InProceedings{ Li_DualModality_MICCAI2024,\n author = { Li, Rui and Ruan, Jingliang and Lu, Yao },\n title = { { Dual-Modality Watershed Fusion Network for Thyroid Nodule Classification of Dual-View CEUS Video } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contrast-enhanced ultrasound (CEUS) allows real-time visualization of the vascular distribution within thyroid nodules, garnering significant attention in their intelligent diagnosis. Existing methods either focus on modifying models while neglecting the unique aspects of CEUS, or rely only single-modality data while overlooking the complementary information contained in the dual-view CEUS data. To overcome these limitations, inspired by the CEUS thyroid imaging reporting and data system (TI-RADS), this paper proposes a new dual-modality watershed fusion network (DWFN) for diagnosing thyroid nodules using dual-view CEUS videos. Specifically, the method introduces the watershed analysis from the remote sensing field and combines it with the optical flow method to extract the enhancement direction feature mentioned in the CEUS TI-RADS. On this basis, the interpretable watershed 3D network (W3DN) is constructed by C3D to further extract the dynamic blood flow features contained in CEUS videos. Furthermore, to make more comprehensive use of clinical information, a dual-modality 2D and 3D combined network, DWFN is constructed, which fuses the morphological features extracted from US images by InceptionResNetV2 and the dynamic blood flow features extracted from CEUS videos by W3DN, to classify thyroid nodules as benign or malignant. The effectiveness of the proposed DWFN method was evaluated using extensive experimental results on a collected dataset of dual-view CEUS videos for thyroid nodules, achieving an area under the receiver operating characteristic curve of 0.920, with accuracy, sensitivity, specificity, positive predictive value, negative predictive value, F1 score of 0.858, 0.845, 0.872, 0.879, 0.837, and 0.861, respectively, outperforming other state-of-the-art methods.", "title":"Dual-Modality Watershed Fusion Network for Thyroid Nodule Classification of Dual-View CEUS Video", "authors":[ "Li, Rui", "Ruan, Jingliang", "Lu, Yao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":255 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1038_paper.pdf", "bibtext":"@InProceedings{ Xu_TeethDreamer_MICCAI2024,\n author = { Xu, Chenfan and Liu, Zhentao and Liu, Yuan and Dou, Yulong and Wu, Jiamin and Wang, Jiepeng and Wang, Minjiao and Shen, Dinggang and Cui, Zhiming },\n title = { { TeethDreamer: 3D Teeth Reconstruction from Five Intra-oral Photographs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Orthodontic treatment usually requires regular face-to-face\nexaminations to monitor dental conditions of the patients. When in-person diagnosis is not feasible, an alternative is to utilize five intra-oral photographs for remote dental monitoring. However, it lacks of 3D information, and how to reconstruct 3D dental models from such sparse view photographs is a challenging problem. In this study, we propose a 3D teeth reconstruction framework, named TeethDreamer, aiming to restore the shape and position of the upper and lower teeth. Given five intra-oral\nphotographs, our approach first leverages a large diffusion model\u2019s prior knowledge to generate novel multi-view images with known poses to address sparse inputs and then reconstructs high-quality 3D teeth models by neural surface reconstruction. To ensure the 3D consistency across\ngenerated views, we integrate a 3D-aware feature attention mechanism in the reverse diffusion process. Moreover, a geometry-aware normal loss is incorporated into the teeth reconstruction process to enhance geometry accuracy. Extensive experiments demonstrate the superiority of our\nmethod over current state-of-the-arts, giving the potential to monitor orthodontic treatment remotely. Our code is available at https:\/\/github.com\/ShanghaiTech-IMPACT\/TeethDreamer.", "title":"TeethDreamer: 3D Teeth Reconstruction from Five Intra-oral Photographs", "authors":[ "Xu, Chenfan", "Liu, Zhentao", "Liu, Yuan", "Dou, Yulong", "Wu, Jiamin", "Wang, Jiepeng", "Wang, Minjiao", "Shen, Dinggang", "Cui, Zhiming" ], "id":"Conference", "arxiv_id":"2407.11419", "GitHub":[ "https:\/\/github.com\/ShanghaiTech-IMPACT\/TeethDreamer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":256 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1471_paper.pdf", "bibtext":"@InProceedings{ Lon_MuGI_MICCAI2024,\n author = { Long, Lifan and Cui, Jiaqi and Zeng, Pinxian and Li, Yilun and Liu, Yuanjun and Wang, Yan },\n title = { { MuGI: Multi-Granularity Interactions of Heterogeneous Biomedical Data for Survival Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multimodal learning significantly benefits survival analysis for cancer, particularly through the integration of pathological images and genomic data. However, this presents new challenges on how to effectively integrate multi-modal biomedical data. Existing multi-modal survival prediction methods focus on mining the consistency or modality-specific information, failing to capture cross-modal interactions. To address this limitation, attention-based methods are proposed to enhance both the consistency and interactions. However, these methods inevitably introduce redundancy due to the overlapped information of multimodal data. In this paper, we propose a Multi-Granularity Interactions of heterogeneous biomedical data framework (MuGI) for precise survival prediction. MuGI consists of: a) unimodal extractor for exploring preliminary modality-specific information, b) multi-modal optimal features capture (MOFC) for extracting ideal multi-modal rep-resentations, eliminating redundancy through decomposed multi-granularity information, as well as capturing consistency in a common space and enhancing modality-specific features in a private space, and c) multimodal hierarchical interaction for sufficient acquisition of cross-modal correlations and interactions through the cooperation of two Bilateral Cross Attention (BCA) modules. We conduct extensive experiments on three cancer cohorts from the Cancer Genome Atlas (TCGA) database. The experimental results demonstrate that our MuGI achieves the state-of-the-art performance, outperforming both unimodal and multi-modal survival prediction methods.", "title":"MuGI: Multi-Granularity Interactions of Heterogeneous Biomedical Data for Survival Prediction", "authors":[ "Long, Lifan", "Cui, Jiaqi", "Zeng, Pinxian", "Li, Yilun", "Liu, Yuanjun", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":257 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0615_paper.pdf", "bibtext":"@InProceedings{ He_F2TNet_MICCAI2024,\n author = { He, Zhibin and Li, Wuyang and Jiang, Yu and Peng, Zhihao and Wang, Pengyu and Li, Xiang and Liu, Tianming and Han, Junwei and Zhang, Tuo and Yuan, Yixuan },\n title = { { F2TNet: FMRI to T1w MRI Knowledge Transfer Network for Brain Multi-phenotype Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Using brain imaging data to predict the non-neuroimaging phenotypes at the individual level is a fundamental goal of system neuroscience. Despite its significance, the high acquisition cost of functional Magnetic Resonance Imaging (fMRI) hampers its clinical translation in phenotype prediction, while the analysis based solely on cost-efficient T1-weighted (T1w) MRI yields inferior performance than fMRI. The reasons lie in that existing works ignore two significant challenges. 1) they neglect the knowledge transfer from fMRI to T1w MRI, failing to achieve effective prediction using cost-efficient T1w MRI. 2) They are limited to predicting a single phenotype and cannot capture the intrinsic dependence among various phenotypes, such as strength and endurance, preventing comprehensive and accurate clinical analysis. To tackle these issues, we propose an FMRI to T1w MRI knowledge transfer Network (F2TNet) to achieve cost-efficient and effective analysis on brain multi-phenotype, representing the first attempt in this field, which consists of a Phenotypes-guided Knowledge Transfer (PgKT) module and a modality-aware Multi-phenotype Prediction (MpP) module. Specifically, PgKT aligns brain nodes across modalities by solving a bipartite graph-matching problem, thereby achieving adaptive knowledge transfer from fMRI to T1w MRI through the guidance of multi-phenotype. Then, MpP enriches the phenotype codes with cross-modal complementary information and decomposes these codes to enable accurate multi-phenotype prediction. Experimental results demonstrate that the F2TNet significantly improves the prediction of brain multi-phenotype and outperforms state-of-the-art methods. The code is available at https:\/\/github.com\/CUHK-AIM-Group\/F2TNet.", "title":"F2TNet: FMRI to T1w MRI Knowledge Transfer Network for Brain Multi-phenotype Prediction", "authors":[ "He, Zhibin", "Li, Wuyang", "Jiang, Yu", "Peng, Zhihao", "Wang, Pengyu", "Li, Xiang", "Liu, Tianming", "Han, Junwei", "Zhang, Tuo", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/F2TNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":258 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1668_paper.pdf", "bibtext":"@InProceedings{ Lee_ADeep_MICCAI2024,\n author = { Lee, Sangyoon and Branzoli, Francesca and Nguyen, Thanh and Andronesi, Ovidiu and Lin, Alexander and Liserre, Roberto and Melkus, Gerd and Chen, Clark and Marja\u0144ska, Ma\u0142gorzata and Bolan, Patrick J. },\n title = { { A Deep Learning Approach for Placing Magnetic Resonance Spectroscopy Voxels in Brain Tumors } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Magnetic resonance spectroscopy (MRS) of brain tumors provides useful metabolic information for diagnosis, treatment response, and prognosis. Single-voxel MRS requires precise planning of the acquisition volume to produce a high-quality signal localized in the pathology of interest. Appropriate placement of the voxel in a brain tumor is determined by the size and morphology of the tumor, and is guided by MR imaging. Consistent placement of a voxel precisely within a tumor requires substantial expertise in neuroimaging interpretation and MRS methodology. The need for such expertise at the time of scan has contributed to low usage of MRS in clinical practice. In this study, we propose a deep learning method to perform voxel placements in brain tumors. The network is trained in a supervised fashion using a database of voxel placements performed by MRS experts. Our proposed method accurately replicates the voxel placements of experts in tumors with comparable tumor coverage, voxel volume, and voxel position to that of experts. This novel deep learning method can be easily applied without an extensive external validation as it only requires a segmented tumor mask as input.", "title":"A Deep Learning Approach for Placing Magnetic Resonance Spectroscopy Voxels in Brain Tumors", "authors":[ "Lee, Sangyoon", "Branzoli, Francesca", "Nguyen, Thanh", "Andronesi, Ovidiu", "Lin, Alexander", "Liserre, Roberto", "Melkus, Gerd", "Chen, Clark", "Marja\u0144ska, Ma\u0142gorzata", "Bolan, Patrick J." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":259 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2146_paper.pdf", "bibtext":"@InProceedings{ Pfa_NoNewDenoiser_MICCAI2024,\n author = { Pfaff, Laura and Wagner, Fabian and Vysotskaya, Nastassia and Thies, Mareike and Maul, Noah and Mei, Siyuan and Wuerfl, Tobias and Maier, Andreas },\n title = { { No-New-Denoiser: A Critical Analysis of Diffusion Models for Medical Image Denoising } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models, originally introduced for image generation, have recently gained attention as a promising image denoising approach. In this work, we perform comprehensive experiments to investigate the challenges posed by diffusion models when applied to medical image denoising. In medical imaging, retaining the original image content, and refraining from adding or removing potentially pathologic details is of utmost importance. Through empirical analysis and discussions, we highlight the trade-off between image perception and distortion in the context of diffusion-based denoising.\nIn particular, we demonstrate that standard diffusion model sampling schemes yield a reduction in PSNR by up to 14 % compared to one-step denoising. Additionally, we provide visual evidence indicating that diffusion models, in combination with stochastic sampling, have a tendency to generate synthetic structures during the denoising process, consequently compromising the clinical validity of the denoised images. Our thorough investigation raises questions about the suitability of diffusion models for medical image denoising, underscoring potential limitations that warrant careful consideration for future applications.", "title":"No-New-Denoiser: A Critical Analysis of Diffusion Models for Medical Image Denoising", "authors":[ "Pfaff, Laura", "Wagner, Fabian", "Vysotskaya, Nastassia", "Thies, Mareike", "Maul, Noah", "Mei, Siyuan", "Wuerfl, Tobias", "Maier, Andreas" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":260 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2899_paper.pdf", "bibtext":"@InProceedings{ Kir_In_MICCAI2024,\n author = { Kirkegaard, Julius B. and Kutuzov, Nikolay P. and Netterstr\u00f8m, Rasmus and Darkner, Sune and Lauritzen, Martin and Lauze, Franc\u0327ois },\n title = { { In vivo deep learning estimation of diffusion coefficients of nanoparticles } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Understanding the transport of molecules in the brain \\emph{in vivo} is the key to learning how the brain regulates its metabolism, how brain pathologies develop, and how most of the developed brain-targeted drugs fail. Two-photon microscopy \u2013 the main tool for \\emph{in vivo} brain imaging \u2013 achieves sub-micrometer resolution and high image contrast when imaging cells, blood vessels, and other microscopic structures. However, images of small and fast-moving objects, e.g. nanoparticles, are ill-suited for analysis of transport with standard methods, e.g. super-localization, because of (i) low photon budgets resulting in noisy images; (ii) severe motion blur due to slow pixel-by-pixel image acquisition by t-photon microscopy; and (iii) high density of tracked objects, preventing their individual localization.\nHere, we developed a deep learning-based estimator of diffusion coefficients of nanoparticles directly from movies recorded with two-photon microscopy \\emph{in vivo}.\nWe\u2019ve benchmarked the method with synthetic data, model experimental data (nanoparticles in water), and \\emph{in vivo} data (nanoparticles in the brain). \nDeep Learning robustly estimates the diffusion coefficient of nanoparticles from movies with severe motion blur and movies with high nanoparticle densities, where, in contrast to the classic algorithms, the deep learning estimator\u2019s accuracy improves with increasing density. \nAs a result, the deep learning estimator facilitates the estimation of diffusion coefficients of nanoparticles in the brain \\emph{in vivo}, where the existing estimators fail.", "title":"In vivo deep learning estimation of diffusion coefficients of nanoparticles", "authors":[ "Kirkegaard, Julius B.", "Kutuzov, Nikolay P.", "Netterstr\u00f8m, Rasmus", "Darkner, Sune", "Lauritzen, Martin", "Lauze, Franc\u0327ois" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kirkegaardlab\/2photodiffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":261 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3582_paper.pdf", "bibtext":"@InProceedings{ Asg_Can_MICCAI2024,\n author = { Asgari-Targhi, Ameneh and Ungi, Tamas and Jin, Mike and Harrison, Nicholas and Duggan, Nicole and Duhaime, Erik P. and Goldsmith, Andrew and Kapur, Tina },\n title = { { Can Crowdsourced Annotations Improve AI-based Congestion Scoring For Bedside Lung Ultrasound? } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Lung ultrasound (LUS) has become an indispensable tool at the bedside in emergency and acute care settings, offering a fast and non-invasive way to assess pulmonary congestion. Its portability and cost-effectiveness make it particularly valuable in resource-limited environments where quick decision-making is critical. Despite its advantages, the interpretation of B-line artifacts, which are key diagnostic indicators for conditions related to pulmonary congestion, can vary significantly among clinicians and even for the same clinician over time. This variability, coupled with the time pressure in acute settings, poses a challenge. To address this, our study introduces a new B-line segmentation method to calculate congestion scores from LUS images, aiming to standardize interpretations. We utilized a large dataset of 31,000 B-line annotations synthesized from over 550,000 crowdsourced opinions on LUS images of 299 patients to improve model training and accuracy. This approach has yielded a model with 94% accuracy in B-line counting (within a margin of 1) on a test set of 100 patients, demonstrating the potential of combining extensive data and crowdsourcing to refine lung ultrasound analysis for pulmonary congestion.", "title":"Can Crowdsourced Annotations Improve AI-based Congestion Scoring For Bedside Lung Ultrasound?", "authors":[ "Asgari-Targhi, Ameneh", "Ungi, Tamas", "Jin, Mike", "Harrison, Nicholas", "Duggan, Nicole", "Duhaime, Erik P.", "Goldsmith, Andrew", "Kapur, Tina" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":262 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2829_paper.pdf", "bibtext":"@InProceedings{ Wan_DPMNet_MICCAI2024,\n author = { Wang, Shudong and Zhao, Xue and Zhang, Yulin and Zhao, Yawu and Zhao, Zhiyuan and Ding, Hengtao and Chen, Tianxing and Qiao, Sibo },\n title = { { DPMNet: Dual-Path MLP-based Network for Aneurysm Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"MLP\u2212based networks, while being lighter than traditional convolution\u2212 and transformer\u2212based networks commonly used in medical image segmentation, often struggle with capturing local structures due to the limitations of fully\u2212connected (FC) layers, making them less ideal for such tasks. To address this issue, we design a Dual\u2212Path MLP\u2212based network (DPMNet) that includes a global and a local branch to understand the input images at different scales. In the two branches, we design an Axial Residual Connection MLP module (ARC\u2212MLP) to combine it with CNNs to capture the input image\u2019s global long-range dependencies and local visual structures simultaneously. Addi-\ntionally, we propose a Shifted Channel\u2212Mixer MLP block (SCM\u2212MLP) across width and height as a key component of ARC\u2212MLP to mix information from different spatial locations and channels. Extensive experiments demonstrate that the DPMNet significantly outperforms seven state\u2212of\u2212the\u2212art convolution\u2212 , transformer\u2212, and MLP\u2212based methods in both Dice and IoU scores, where the Dice and IoU scores for the IAS\u2212L dataset are 88.98% and 80.31% respectively. Code is available\nat https:\/\/github.com\/zx123868\/DPMNet.", "title":"DPMNet: Dual-Path MLP-based Network for Aneurysm Image Segmentation", "authors":[ "Wang, Shudong", "Zhao, Xue", "Zhang, Yulin", "Zhao, Yawu", "Zhao, Zhiyuan", "Ding, Hengtao", "Chen, Tianxing", "Qiao, Sibo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zx123868\/DPMNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":263 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0442_paper.pdf", "bibtext":"@InProceedings{ Mus_Analyzing_MICCAI2024,\n author = { Musa, Aminu and Ibrahim Adamu, Mariya and Kakudi, Habeebah Adamu and Hernandez, Monica and Lawal, Yusuf },\n title = { { Analyzing Cross-Population Domain Shift in Chest X-Ray Image Classification and Mitigating the Gap with Deep Supervised Domain Adaptation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image analysis powered by artificial intelligence (AI) is pivotal in healthcare diagnostics. However, the efficacy of machine learning models relies on their adaptability to diverse patient populations, presenting domain shift challenges. This study investigates domain shift in chest X-ray classification, focusing on cross-population variations, specifically in African dataset. Disparities between source and target populations were measured by evaluating model performance. We propose supervised domain adaptation to mitigate this issue, leveraging labeled data in both domains for fine-tuning. Our experiments show significant improvements in model accuracy for chest X-ray classification in the African dataset. This research underscores the importance of domain-aware model development in AI-driven healthcare, contributing to addressing domain-shift challenges in medical imaging.", "title":"Analyzing Cross-Population Domain Shift in Chest X-Ray Image Classification and Mitigating the Gap with Deep Supervised Domain Adaptation", "authors":[ "Musa, Aminu", "Ibrahim Adamu, Mariya", "Kakudi, Habeebah Adamu", "Hernandez, Monica", "Lawal, Yusuf" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":264 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4230_paper.pdf", "bibtext":"@InProceedings{ Gan_MedContext_MICCAI2024,\n author = { Gani, Hanan and Naseer, Muzammal and Khan, Fahad and Khan, Salman },\n title = { { MedContext: Learning Contextual Cues for Efficient Volumetric Medical Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep neural networks have significantly improved volumetric medical segmentation, but they generally require large-scale annotated data to achieve better performance, which can be expensive and prohibitive to obtain. To address this limitation, existing works typically perform transfer learning or design dedicated pretraining-finetuning stages to learn representative features. However, the mismatch between the source and target domain can make it challenging to learn optimal representation for volumetric data, while the multi-stage training demands higher compute as well as careful selection of stage-specific design choices. In contrast, we propose a universal training framework called MedContext that is architecture-agnostic and can be incorporated into any existing training framework for 3D medical segmentation. Our approach effectively learns self supervised contextual cues jointly with the supervised voxel segmentation task without requiring large-scale annotated volumetric medical data or dedicated pretraining-finetuning stages. The proposed approach induces contextual knowledge in the network by learning to reconstruct the missing organ or parts of an organ in the output segmentation space. The effectiveness of MedContext is validated across multiple 3D medical datasets and four state-of-the-art model architectures. Our approach demonstrates consistent gains in segmentation performance across datasets and architectures even in few-shot scenarios.", "title":"MedContext: Learning Contextual Cues for Efficient Volumetric Medical Segmentation", "authors":[ "Gani, Hanan", "Naseer, Muzammal", "Khan, Fahad", "Khan, Salman" ], "id":"Conference", "arxiv_id":"2402.17725", "GitHub":[ "https:\/\/github.com\/hananshafi\/MedContext" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":265 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1178_paper.pdf", "bibtext":"@InProceedings{ Li_GMMCoRegNet_MICCAI2024,\n author = { Li, Zhenyu and Yu, Fan and Lu, Jie and Qian, Zhen },\n title = { { GMM-CoRegNet: A Multimodal Groupwise Registration Framework Based on Gaussian Mixture Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Within-subject multimodal groupwise registration aims to align a group of multimodal images into a common structural space. Existing groupwise registration methods often rely on intensity-based similarity measures, but can be computationally expensive for large sets of images. Some methods build statistical relationships between image intensities and anatomical structures, which may be misleading when the assumption of consistent intensity-class correspondences do not hold. Additionally, these methods can be unstable in batch group registration when the number of anatomical structures varies across different image groups. To tackle these issues, we propose GMM-CoRegNet, a weakly supervised deep learning framework for multimodal images groupwise registration. A prior Gaussian Mixture Model (GMM) consolidating the image intensities and anatomical structures is constructed using the label of reference image, then we derive a novel similarity measure for groupwise registration based on GMM and iteratively optimize the GMM throughout the training process. Notably, GMM-CoRegNet can register an arbitrary number of images simultaneously to a reference image only needing the label of reference image. We compared GMM-CoRegNet with state-of-the-art groupwise registration methods on two carotid datasets and the public BrainWeb dataset, demonstrated its superior registration performance even for the registration scenario of inconsistent intensity-class mappings.", "title":"GMM-CoRegNet: A Multimodal Groupwise Registration Framework Based on Gaussian Mixture Model", "authors":[ "Li, Zhenyu", "Yu, Fan", "Lu, Jie", "Qian, Zhen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":266 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0456_paper.pdf", "bibtext":"@InProceedings{ Pha_Structural_MICCAI2024,\n author = { Phan, Vu Minh Hieu and Xie, Yutong and Zhang, Bowen and Qi, Yuankai and Liao, Zhibin and Perperidis, Antonios and Phung, Son Lam and Verjans, Johan W. and To, Minh-Son },\n title = { { Structural Attention: Rethinking Transformer for Unpaired Medical Image Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Unpaired medical image synthesis aims to provide complementary information for an accurate clinical diagnostics, and address challenges in obtaining aligned multi-modal medical scans. Transformer-based models excel in imaging translation tasks thanks to their ability to capture long-range dependencies. Although effective in supervised training, their performance falters in unpaired image synthesis, particularly in synthesizing structural details. This paper empirically demonstrates that, lacking strong inductive biases, Transformer can converge to non-optimal solutions in the absence of paired data. To address this, we introduce UNet Structured Transformer (UNest) \u2014 a novel architecture incorporating structural inductive biases for unpaired medical image synthesis. We leverage the foundational Segment-Anything Model to precisely extract the foreground structure and perform structural attention within the main anatomy. This guides the model to learn key anatomical regions, thus improving structural synthesis under the lack of supervision in unpaired training. Evaluated on two public datasets, spanning three modalities, i.e., MR, CT, and PET, UNest improves recent methods by up to 19.30% across six medical image synthesis tasks. Our code is released at https:\/\/github.com\/HieuPhan33\/MICCAI2024-UNest.", "title":"Structural Attention: Rethinking Transformer for Unpaired Medical Image Synthesis", "authors":[ "Phan, Vu Minh Hieu", "Xie, Yutong", "Zhang, Bowen", "Qi, Yuankai", "Liao, Zhibin", "Perperidis, Antonios", "Phung, Son Lam", "Verjans, Johan W.", "To, Minh-Son" ], "id":"Conference", "arxiv_id":"2406.18967", "GitHub":[ "https:\/\/github.com\/HieuPhan33\/MICCAI2024-UNest" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":267 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3451_paper.pdf", "bibtext":"@InProceedings{ Hus_PromptSmooth_MICCAI2024,\n author = { Hussein, Noor and Shamshad, Fahad and Naseer, Muzammal and Nandakumar, Karthik },\n title = { { PromptSmooth: Certifying Robustness of Medical Vision-Language Models via Prompt Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical vision-language models (Med-VLMs) trained on large datasets of medical image-text pairs and later fine-tuned for specific tasks have emerged as a mainstream paradigm in medical image analysis. However, recent studies have highlighted the susceptibility of these Med-VLMs to adversarial attacks, raising concerns about their safety and robustness. Randomized smoothing is a well-known technique for turning any classifier into a model that is certifiably robust to adversarial perturbations. However, this approach requires retraining the Med-VLM-based classifier so that it classifies well under Gaussian noise, which is often infeasible in practice. In this paper, we propose a novel framework called PromptSmooth to achieve efficient certified robustness of Med-VLMs by leveraging the concept of prompt learning. Given any pre-trained Med-VLM, PromptSmooth adapts it to handle Gaussian noise by learning textual prompts in a zero-shot or few-shot manner, achieving a delicate balance between accuracy and robustness, while minimizing the computational overhead. Moreover, PromptSmooth requires only a single model to handle multiple noise levels, which substantially reduces the computational cost compared to traditional methods that rely on training a separate model for each noise level. Comprehensive experiments based on three Med-VLMs and across six downstream datasets of various imaging modalities demonstrate the efficacy of PromptSmooth.", "title":"PromptSmooth: Certifying Robustness of Medical Vision-Language Models via Prompt Learning", "authors":[ "Hussein, Noor", "Shamshad, Fahad", "Naseer, Muzammal", "Nandakumar, Karthik" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/nhussein\/promptsmooth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":268 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3061_paper.pdf", "bibtext":"@InProceedings{ Liu_VolumeNeRF_MICCAI2024,\n author = { Liu, Jiachen and Bai, Xiangzhi },\n title = { { VolumeNeRF: CT Volume Reconstruction from a Single Projection View } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Computed tomography (CT) plays a significant role in clinical practice by providing detailed three-dimensional information, aiding in accurate assessment of various diseases. However, CT imaging requires a large number of X-ray projections from different angles and exposes patients to high doses of radiation. Here we propose VolumeNeRF, based on neural radiance fields (NeRF), for reconstructing CT volumes from a single-view X-ray. During training, our network learns to generate a continuous representation of the CT scan conditioned on the input X-ray image and render an X-ray image similar to the input from the same viewpoint as the input. Considering the ill-posedness and the complexity of the single-perspective generation task, we introduce likelihood images and the average CT images to incorporate prior anatomical knowledge. A novel projection attention module is designed to help the model learn the spatial correspondence between voxels in CT images and pixels in X-ray images during the imaging process. Extensive experiments conducted on a publicly available chest CT dataset show that our VolumeNeRF achieves better performance than other state-of-the-art methods. Our code is available at https:\/\/www.github.com\/Aurora132\/VolumeNeRF.", "title":"VolumeNeRF: CT Volume Reconstruction from a Single Projection View", "authors":[ "Liu, Jiachen", "Bai, Xiangzhi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/www.github.com\/Aurora132\/VolumeNeRF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":269 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3539_paper.pdf", "bibtext":"@InProceedings{ Gon_Anatomical_MICCAI2024,\n author = { Goncharov, Mikhail and Samokhin, Valentin and Soboleva, Eugenia and Sokolov, Roman and Shirokikh, Boris and Belyaev, Mikhail and Kurmukov, Anvar and Oseledets, Ivan },\n title = { { Anatomical Positional Embeddings } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose a self-supervised model producing 3D anatomical positional embeddings (APE) of individual medical image voxels. APE encodes voxels\u2019 anatomical closeness, i.e., voxels of the same organ or nearby organs always have closer positional embeddings than the voxels of more distant body parts. In contrast to the existing models of anatomical positional embeddings, our method is able to efficiently produce a map of voxel-wise embeddings for a whole volumetric input image, which makes it an optimal choice for different downstream applications. We train our APE model on 8400 publicly available CT images of abdomen and chest regions. We demonstrate its superior performance compared with the existing models on anatomical landmark retrieval and weakly-supervised few-shot localization of 13 abdominal organs. As a practical application, we show how to cheaply train APE to crop raw CT images to different anatomical regions of interest with 0.99 recall, while reducing the image volume by 10-100 times. The code and the pre-trained APE model are available at https:\/\/github.com\/mishgon\/ape.", "title":"Anatomical Positional Embeddings", "authors":[ "Goncharov, Mikhail", "Samokhin, Valentin", "Soboleva, Eugenia", "Sokolov, Roman", "Shirokikh, Boris", "Belyaev, Mikhail", "Kurmukov, Anvar", "Oseledets, Ivan" ], "id":"Conference", "arxiv_id":"2409.10291", "GitHub":[ "https:\/\/github.com\/mishgon\/ape" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":270 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3156_paper.pdf", "bibtext":"@InProceedings{ Su_SelfPaced_MICCAI2024,\n author = { Su, Junming and Shen, Zhiqiang and Cao, Peng and Yang, Jinzhu and Zaiane, Osmar R. },\n title = { { Self-Paced Sample Selection for Barely-Supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The existing barely-supervised medical image segmentation (BSS) methods, adopting a registration-segmentation paradigm, aim to learn from data with very few annotations to mitigate the extreme label scarcity problem.\nHowever, this paradigm poses a challenge: pseudo-labels generated by image registration come with significant noise. \nTo address this issue, we propose a self-paced sample selection framework (SPSS) for BSS. \nSpecifically, SPSS comprises two main components: 1) self-paced uncertainty sample selection (SU) for explicitly improving the quality of pseudo labels in the image space, and 2) self-paced bidirectional feature contrastive learning (SC) for implicitly improving the quality of pseudo labels through enhancing the separability between class semantics in the feature space. \nBoth SU and SC are trained collaboratively in a self-paced learning manner, ensuring that SPSS can learn from high-quality pseudo labels for BSS. \nExtensive experiments on two public medical image segmentation datasets demonstrate the effectiveness and superiority of SPSS over the state-of-the-art.", "title":"Self-Paced Sample Selection for Barely-Supervised Medical Image Segmentation", "authors":[ "Su, Junming", "Shen, Zhiqiang", "Cao, Peng", "Yang, Jinzhu", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":271 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1648_paper.pdf", "bibtext":"@InProceedings{ Lu_PathoTune_MICCAI2024,\n author = { Lu, Jiaxuan and Yan, Fang and Zhang, Xiaofan and Gao, Yue and Zhang, Shaoting },\n title = { { PathoTune: Adapting Visual Foundation Model to Pathological Specialists } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"As natural image understanding moves towards the pretrain-finetune era, research in pathology imaging is concurrently evolving. Despite the predominant focus on pretraining pathological foundation models, how to adapt foundation models to downstream tasks is little explored. For downstream adaptation, we propose the existence of two domain gaps, i.e., the Foundation-Task Gap and the Task-Instance Gap. To mitigate these gaps, we introduce PathoTune, a framework designed to efficiently adapt pathological or even visual foundation models to pathology-specific tasks via multi-modal prompt tuning. The proposed framework leverages Task-specific Visual Prompts and Task-specific Textual Prompts to identify task-relevant features, along with Instance-specific Visual Prompts for encoding single pathological image features. Results across multiple datasets at both patch-level and WSI-level demonstrate its superior performance over single-modality prompt tuning approaches. Significantly, PathoTune facilitates the direct adaptation of natural visual foundation models to pathological tasks, drastically outperforming pathological foundation models with simple linear probing. The code is available at https:\/\/github.com\/openmedlab\/PathoDuet.", "title":"PathoTune: Adapting Visual Foundation Model to Pathological Specialists", "authors":[ "Lu, Jiaxuan", "Yan, Fang", "Zhang, Xiaofan", "Gao, Yue", "Zhang, Shaoting" ], "id":"Conference", "arxiv_id":"2403.16497", "GitHub":[ "https:\/\/github.com\/openmedlab\/PathoDuet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":272 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2059_paper.pdf", "bibtext":"@InProceedings{ Li_SelfSupervisedContrastive_MICCAI2024,\n author = { Li, Junchi and Wan, Guojia and Liao, Minghui and Liao, Fei and Du, Bo },\n title = { { Self-Supervised Contrastive Graph Views for Learning Neuron-level Circuit Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Learning Neuron-level Circuit Network can be used on automatic neuron classification and connection prediction, both of which are fundamental tasks for connectome reconstruction and deciphering brain functions. Traditional approaches to this learning process have relied on extensive neuron typing and labor-intensive proofread. In this paper, we introduce FlyGCL, a self-supervised learning approach designed to automatically learn neuron-level circuit networks, enabling the capture of the connectome\u2019s topological feature. Specifically, we leverage graph augmentation methods to generate various contrastive graph views. The proposed method differentiates between positive and negative samples in these views, allowing it to encode the structural representation of neurons as adaptable latent features that can be used for downstream tasks such as neuron classification and connection prediction. To evaluate our method, we construct two new Neuron-level Circuit Network datasets, named HemiBrain-C and Manc-C, derived from the FlyEM project. Experimental results show that FlyGCL attains neuron classification accuracies of 73.8% and 57.4%, respectively, with >0.95 AUC in connection prediction tasks. Our code and data are available at GitHub https:\/\/github.com\/mxz12119\/FlyGCL.", "title":"Self-Supervised Contrastive Graph Views for Learning Neuron-level Circuit Network", "authors":[ "Li, Junchi", "Wan, Guojia", "Liao, Minghui", "Liao, Fei", "Du, Bo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mxz12119\/FlyGCL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":273 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3901_paper.pdf", "bibtext":"@InProceedings{ Xu_Simultaneous_MICCAI2024,\n author = { Xu, Yushen and Li, Xiaosong and Jie, Yuchan and Tan, Haishu },\n title = { { Simultaneous Tri-Modal Medical Image Fusion and Super-Resolution using Conditional Diffusion Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In clinical practice, tri-modal medical image fusion, compared to the existing dual-modal technique, can provide a more comprehensive view of the lesions, aiding physicians in evaluating the disease\u2019s shape, location, and biological activity. However, due to the limitations of imaging equipment and considerations for patient safety, the quality of medical images is usually limited, leading to sub-optimal fusion performance, and affecting the depth of image analysis by the physician. Thus, there is an urgent need for a technology that can both enhance image resolution and integrate multi-modal information. Although current image processing methods can effectively address image fusion and super-resolution individually, solving both problems synchronously remains extremely challenging. In this paper, we propose TFS-Diff, a simultaneously realize tri-modal medical image fusion and super-resolution model. Specially, TFS-Diff is based on the diffusion model generation of a random iterative denoising process. We also develop a simple objective function and the proposed fusion super-resolution loss, effectively evaluates the uncertainty in the fusion and ensures the stability of the optimization process. And the channel attention module is proposed to effectively integrate key information from different modalities for clinical diagnosis, avoiding information loss caused by multiple image processing. Extensive experiments on public Harvard datasets show that TFS-Diff significantly surpass the existing state-of-the-art methods in both quantitative and visual evaluations. Code is available at https:\/\/github.com\/XylonXu01\n\/TFS-Diff.", "title":"Simultaneous Tri-Modal Medical Image Fusion and Super-Resolution using Conditional Diffusion Model", "authors":[ "Xu, Yushen", "Li, Xiaosong", "Jie, Yuchan", "Tan, Haishu" ], "id":"Conference", "arxiv_id":"2404.17357", "GitHub":[ "https:\/\/github.com\/XylonXu01\/TFS-Diff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":274 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0424_paper.pdf", "bibtext":"@InProceedings{ Wol_Binary_MICCAI2024,\n author = { Wolleb, Julia and Bieder, Florentin and Friedrich, Paul and Zhang, Peter and Durrer, Alicia and Cattin, Philippe C. },\n title = { { Binary Noise for Binary Tasks: Masked Bernoulli Diffusion for Unsupervised Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The high performance of denoising diffusion models for image generation has also paved the way for their application in unsupervised medical anomaly detection.\nAs diffusion-based methods require a lot of GPU memory and have long sampling times, we present a novel and fast unsupervised anomaly detection approach based on latent Bernoulli diffusion models. We first apply an autoencoder to compress the input images into a binary latent representation. Next, a diffusion model that follows a Bernoulli noise schedule is employed to this latent space and trained to restore binary latent representations from perturbed ones. The binary nature of this diffusion model allows us to identify entries in the latent space that have a high probability of flipping their binary code during the denoising process, which indicates out-of-distribution data. We propose a masking algorithm based on these probabilities, which improves the anomaly detection scores. We achieve state-of-the-art performance compared to other diffusion-based unsupervised anomaly detection algorithms while significantly reducing sampling time and memory consumption. The code is available at https:\/\/github.com\/JuliaWolleb\/Anomaly_berdiff.", "title":"Binary Noise for Binary Tasks: Masked Bernoulli Diffusion for Unsupervised Anomaly Detection", "authors":[ "Wolleb, Julia", "Bieder, Florentin", "Friedrich, Paul", "Zhang, Peter", "Durrer, Alicia", "Cattin, Philippe C." ], "id":"Conference", "arxiv_id":"2403.11667", "GitHub":[ "https:\/\/github.com\/JuliaWolleb\/Anomaly_berdiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":275 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0208_paper.pdf", "bibtext":"@InProceedings{ Min_Biomechanicsinformed_MICCAI2024,\n author = { Min, Zhe and Baum, Zachary M. C. and Saeed, Shaheer Ullah and Emberton, Mark and Barratt, Dean C. and Taylor, Zeike A. and Hu, Yipeng },\n title = { { Biomechanics-informed Non-rigid Medical Image Registration and its Inverse Material Property Estimation with Linear and Nonlinear Elasticity } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"This paper investigates both biomechanical-constrained non-rigid medical image registrations and accurate identifications of material properties for soft tissues, using physics-informed neural networks (PINNs). The complex nonlinear elasticity theory is leveraged to formally establish the partial differential equations (PDEs) representing physics laws of biomechanical constraints that need to be satisfied, with which registration and identification tasks are treated as forward (i.e., data-driven solutions of PDEs) and inverse (i.e., parameter estimation) problems under PINNs respectively. Two net configurations (i.e., Cfg1 and Cfg2) have also been compared for both linear and nonlinear physics model. Two sets of experiments have been conducted, using pairs of undeformed and deformed MR images from clinical cases of prostate cancer biopsy.", "title":"Biomechanics-informed Non-rigid Medical Image Registration and its Inverse Material Property Estimation with Linear and Nonlinear Elasticity", "authors":[ "Min, Zhe", "Baum, Zachary M. C.", "Saeed, Shaheer Ullah", "Emberton, Mark", "Barratt, Dean C.", "Taylor, Zeike A.", "Hu, Yipeng" ], "id":"Conference", "arxiv_id":"2407.03292", "GitHub":[ "https:\/\/github.com\/zhemin-1992\/registration_pinns" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":276 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2022_paper.pdf", "bibtext":"@InProceedings{ Hu_DCoRP_MICCAI2024,\n author = { Hu, Haoyu and Zhang, Hongrun and Li, Chao },\n title = { { D-CoRP: Differentiable Connectivity Refinement for Functional Brain Networks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Brain network is an important tool for understanding the brain, offering insights for scientific research and clinical diagnosis. Existing models for brain networks typically primarily focus on brain regions or overlook the complexity of brain connectivities. MRI-derived brain network data is commonly susceptible to connectivity noise, underscoring the necessity of incorporating connectivities into the modeling of brain networks. To address this gap, we introduce a differentiable module for refining brain connectivity. We develop the multivariate optimization based on information bottleneck theory to address the complexity of the brain network and filter noisy or redundant connections. Also, our method functions as a flexible plugin that is adaptable to most graph neural networks. Our extensive experimental results show that the proposed method can significantly improve the performance of various baseline models and outperform other state-of-the-art methods, indicating the effectiveness and generalizability of the proposed method in refining brain network connectivity. The code is available at https:\/\/github.com\/Fighting-HHY\/D-CoRP.", "title":"D-CoRP: Differentiable Connectivity Refinement for Functional Brain Networks", "authors":[ "Hu, Haoyu", "Zhang, Hongrun", "Li, Chao" ], "id":"Conference", "arxiv_id":"2405.18658", "GitHub":[ "https:\/\/github.com\/Fighting-HHY\/D-CoRP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":277 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0885_paper.pdf", "bibtext":"@InProceedings{ Zha_Implicit_MICCAI2024,\n author = { Zhang, Minghui and Zhang, Hanxiao and You, Xin and Yang, Guang-Zhong and Gu, Yun },\n title = { { Implicit Representation Embraces Challenging Attributes of Pulmonary Airway Tree Structures } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"High-\ufb01delity modeling of the pulmonary airway tree from CT scans is critical to preoperative planning. However, the granularity of CT scan resolutions and the intricate topologies limit the accuracy of manual or deep-learning-based delineation of airway structures, resulting in coarse representation accompanied by spike-like noises and disconnectivity issues. To address these challenges, we introduce a Deep Geometric Correspondence Implicit (DGCI) network that implicitly models airway tree structures in the continuous space rather than discrete voxel grids. DGCI \ufb01rst explores the intrinsic topological features shared within di\ufb00erent airway cases on top of implicit neural representation(INR). Speci\ufb01cally, we establish a reversible correspondence \ufb02ow to constrain the feature space of training shapes. Moreover, implicit geometric regularization is utilized to promote a smooth and high-\ufb01delity representation of \ufb01ne-scaled airway structures. By transcending voxel-based representation, DGCI acquires topological insights and integrates geometric regularization into INR, generating airway tree structures with state-of-the-art topological \ufb01delity. Detailed evaluation results on the public dataset demonstrated the superiority of the DGCI in the scalable delineation of airways and downstream applications. Source codes can be found at: https:\/\/github.com\/EndoluminalSurgicalVision-IMR\/DGCI.", "title":"Implicit Representation Embraces Challenging Attributes of Pulmonary Airway Tree Structures", "authors":[ "Zhang, Minghui", "Zhang, Hanxiao", "You, Xin", "Yang, Guang-Zhong", "Gu, Yun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/EndoluminalSurgicalVision-IMR\/DGCI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":278 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0770_paper.pdf", "bibtext":"@InProceedings{ Cec_URCDM_MICCAI2024,\n author = { Cechnicka, Sarah and Ball, James and Baugh, Matthew and Reynaud, Hadrien and Simmonds, Naomi and Smith, Andrew P.T. and Horsfield, Catherine and Roufosse, Candice and Kainz, Bernhard },\n title = { { URCDM: Ultra-Resolution Image Synthesis in Histopathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diagnosing medical conditions from histopathology data requires a thorough analysis across the various resolutions of Whole Slide Images (WSI). However, existing generative methods fail to consistently represent the hierarchical structure of WSIs due to a focus on high-fidelity patches. To tackle this, we propose Ultra-Resolution Cascaded Diffusion Models (URCDMs) which are capable of synthesising entire histopathology images at high resolutions whilst authentically capturing the details of both the underlying anatomy and pathology at all magnification levels. We evaluate our method on three separate datasets, consisting of brain, breast and kidney tissue, and surpass existing state-of-the-art multi-resolution models. Furthermore, an expert evaluation study was conducted, demonstrating that URCDMs consistently generate outputs across various resolutions that trained evaluators cannot distinguish from real images. All code and additional examples can be found on GitHub.", "title":"URCDM: Ultra-Resolution Image Synthesis in Histopathology", "authors":[ "Cechnicka, Sarah", "Ball, James", "Baugh, Matthew", "Reynaud, Hadrien", "Simmonds, Naomi", "Smith, Andrew P.T.", "Horsfield, Catherine", "Roufosse, Candice", "Kainz, Bernhard" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/scechnicka\/URCDM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":279 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2051_paper.pdf", "bibtext":"@InProceedings{ Liu_Generating_MICCAI2024,\n author = { Liu, Zeyu and Zhang, Tianyi and He, Yufang and Zhang, Guanglei },\n title = { { Generating Progressive Images from Pathological Transitions via Diffusion Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pathological image analysis is a crucial field in deep learning applications. However, training effective models demands large-scale annotated data, which faces challenges due to sampling and annotation scarcity. The rapid developing generative models show potential to generate more training samples in recent studies. However, they also struggle with generalization diversity when limited training data is available, making them incapable of generating effective samples. Inspired by pathological transitions between different stages, we propose an adaptive depth-controlled diffusion (ADD) network for effective data augmentation. This novel approach is rooted in domain migration, where a hybrid attention strategy blends local and global attention priorities. With feature measuring, the adaptive depth-controlled strategy guides the bidirectional diffusion. It simulates pathological feature transition and maintains locational similarity. Based on a tiny training set (samples \u2264 500), ADD yields cross-domain progressive images with corresponding soft labels. Experiments on two datasets suggest significant improvements in generation diversity, and the effectiveness of the generated progressive samples is highlighted in downstream classification tasks.", "title":"Generating Progressive Images from Pathological Transitions via Diffusion Model", "authors":[ "Liu, Zeyu", "Zhang, Tianyi", "He, Yufang", "Zhang, Guanglei" ], "id":"Conference", "arxiv_id":"2311.12316", "GitHub":[ "https:\/\/github.com\/Rowerliu\/ADD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":280 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1347_paper.pdf", "bibtext":"@InProceedings{ Zha_Fundus2Video_MICCAI2024,\n author = { Zhang, Weiyi and Huang, Siyu and Yang, Jiancheng and Chen, Ruoyu and Ge, Zongyuan and Zheng, Yingfeng and Shi, Danli and He, Mingguang },\n title = { { Fundus2Video: Cross-Modal Angiography Video Generation from Static Fundus Photography with Clinical Knowledge Guidance } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fundus Fluorescein Angiography (FFA) is a critical tool for assessing retinal vascular dynamics and aiding in the diagnosis of eye diseases. However, its invasive nature and less accessibility compared to Color Fundus (CF) images pose significant challenges. Current CF to FFA translation methods are limited to static generation. In this work, we pioneer dynamic FFA video generation from static CF images. Fundus Fluorescein Angiography (FFA) is a critical tool for assessing retinal vascular dynamics and aiding in the diagnosis of eye diseases. However, its invasive nature and less accessibility compared to Color Fundus (CF) images pose significant challenges. Current CF to FFA translation methods are limited to static generation. In this work, we pioneer dynamic FFA video generation from static CF im- ages. We introduce an autoregressive GAN for smooth, memory-saving frame-by-frame FFA synthesis. To enhance the focus on dynamic le- sion changes in FFA regions, we design a knowledge mask based on clinical experience. Leveraging this mask, our approach integrates inno- vative knowledge mask-guided techniques, including knowledge-boosted attention, knowledge-aware discriminators, and mask-enhanced patch- NCE loss, aimed at refining generation in critical areas and addressing the pixel misalignment challenge. Our method achieves the best FVD of 1503.21 and PSNR of 11.81 compared to other common video generation approaches. Human assessment by an ophthalmologist confirms its high generation quality. Notably, our knowledge mask surpasses supervised lesion segmentation masks, offering a promising non-invasive alternative to traditional FFA for research and clinical applications. The code is available at https:\/\/github.com\/Michi-3000\/Fundus2Video.", "title":"Fundus2Video: Cross-Modal Angiography Video Generation from Static Fundus Photography with Clinical Knowledge Guidance", "authors":[ "Zhang, Weiyi", "Huang, Siyu", "Yang, Jiancheng", "Chen, Ruoyu", "Ge, Zongyuan", "Zheng, Yingfeng", "Shi, Danli", "He, Mingguang" ], "id":"Conference", "arxiv_id":"2408.15217", "GitHub":[ "https:\/\/github.com\/Michi-3000\/Fundus2Video" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":281 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2020_paper.pdf", "bibtext":"@InProceedings{ Cai_BPaCo_MICCAI2024,\n author = { Cai, Zhiyuan and Wei, Tianyunxi and Lin, Li and Chen, Hao and Tang, Xiaoying },\n title = { { BPaCo: Balanced Parametric Contrastive Learning for Long-tailed Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image classification is an essential medical image analysis tasks. However, due to data scarcity of rare diseases in clinical scenarios, the acquired medical image datasets may exhibit long-tailed distributions. Previous works employ class re-balancing to address this issue yet the representation is usually not discriminative enough. Inspired by contrastive learning\u2019s power in representation learning, in this paper, we propose and validate a contrastive learning based framework, named Balanced Parametric Contrastive learning (BPaCo), to tackle long-tailed medical image classification. There are three key components in BPaCo: across-batch class-averaging to balance the gradient contribution from negative classes; hybrid class-complement to have all classes appear in every mini-batch for discriminative prototypes; cross-entropy logit compensation to \nformulate an end-to-end classification framework with even stronger feature representations. Our BPaCo shows outstanding classification performance and high computational efficiency on three highly-imbalanced medical image classification datasets.", "title":"BPaCo: Balanced Parametric Contrastive Learning for Long-tailed Medical Image Classification", "authors":[ "Cai, Zhiyuan", "Wei, Tianyunxi", "Lin, Li", "Chen, Hao", "Tang, Xiaoying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Davidczy\/BPaCo" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":282 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0727_paper.pdf", "bibtext":"@InProceedings{ Zho_ccRCC_MICCAI2024,\n author = { Zhou, Huijian and Tian, Zhiqiang and Han, Xiangmin and Du, Shaoyi and Gao, Yue },\n title = { { ccRCC Metastasis Prediction via Exploring High-Order Correlations on Multiple WSIs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Metastasis prediction based on gigapixel histopathology whole-slide images (WSIs) is crucial for early diagnosis and clinical decision-making of clear cell renal cell carcinoma (ccRCC). However, most existing methods focus on extracting task-related features from a single WSI, while ignoring the correlations among WSIs, which is important for metastasis prediction when a single patient has multiple pathological slides. In this case, we propose a multi-slice-based hypergraph computation (MSHGC) method for metastasis prediction, which considers the intra-correlations within a single WSI and cross-correlations among multiple WSIs of a single patient simultaneously. Specifically, intra-correlations are captured within both topology and semantic feature spaces, while cross-correlations are modeled between the patches from different WSIs. Finally, the attention mechanism is used to suppress the contribution of task-irrelevant patches and enhance the contribution of task-relevant patches. MSHGC achieves the C-index of 0.8441 and 0.8390 on two carcinoma datasets(namely H1 and H2), outperforming state-of-the-art methods, which demonstrates the effectiveness of the proposed MSHGC.", "title":"ccRCC Metastasis Prediction via Exploring High-Order Correlations on Multiple WSIs", "authors":[ "Zhou, Huijian", "Tian, Zhiqiang", "Han, Xiangmin", "Du, Shaoyi", "Gao, Yue" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":283 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0244_paper.pdf", "bibtext":"@InProceedings{ He_OpenSet_MICCAI2024,\n author = { He, Along and Li, Tao and Zhao, Yitian and Zhao, Junyong and Fu, Huazhu },\n title = { { Open-Set Semi-Supervised Medical Image Classification with Learnable Prototypes and Outlier Filter } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning (SSL) offers a pragmatic approach to harnessing unlabeled data, particularly in contexts where annotation costs are prohibitively high. However, in practical clinical settings, unlabeled datasets inevitably encompass outliers that do not align with labeled classes, constituting what is known as open-set Semi-supervised learning (OSSL). While existing methods have shown promising results in domains such as natural image processing, they often overlook the nuanced characteristics intrinsic to medical images, rendering them less applicable in this domain.\nIn this work, we introduce a novel framework tailored for the nuanced challenges of \\textbf{open}-set \\textbf{s}emi-\\textbf{s}upervised \\textbf{c}lassification (OpenSSC) in medical imaging. OpenSSC comprises three integral components. Firstly, we propose the utilization of learnable prototypes to distill a compact representation of the fine-grained characteristics inherent in identified classes. Subsequently, a multi-binary discriminator is introduced to consolidate closed-set predictions and effectively delineate whether the sample belongs to its ground truth or not. Building upon these components, we present a joint outlier filter mechanism designed to classify known classes while discerning and identifying unknown classes within unlabeled datasets. Our proposed method demonstrates efficacy in handling open-set data.\nExtensive experimentation validates the effectiveness of our approach, showcasing superior performance compared to existing state-of-the-art methods in two distinct medical image classification tasks.", "title":"Open-Set Semi-Supervised Medical Image Classification with Learnable Prototypes and Outlier Filter", "authors":[ "He, Along", "Li, Tao", "Zhao, Yitian", "Zhao, Junyong", "Fu, Huazhu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":284 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0697_paper.pdf", "bibtext":"@InProceedings{ Xu_PolypMamba_MICCAI2024,\n author = { Xu, Zhongxing and Tang, Feilong and Chen, Zhe and Zhou, Zheng and Wu, Weishan and Yang, Yuyao and Liang, Yu and Jiang, Jiyu and Cai, Xuyue and Su, Jionglong },\n title = { { Polyp-Mamba: Polyp Segmentation with Visual Mamba } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of polyps is crucial for efficient colorectal cancer detection during the colonoscopy screenings. State Space Models, exemplified by Mamba, have recently emerged as a promising approach, excelling in long-range interaction modeling with linear computational complexity. However, previous methods do not consider the cross-scale dependencies of different pixels and the consistency in feature representations and semantic embedding, which are crucial for polyp segmentation. Therefore, we introduce Polyp-Mamba, a novel unified framework aimed at overcoming the above limitations by integrating multi-scale feature learning with semantic structure analysis. Specifically, our framework includes a Scale-Aware Semantic module that enables the embedding of multi-scale features from the encoder to achieve semantic information modeling across both intra- and inter-scales, rather than the single-scale approach employed in prior studies. Furthermore, the Global Semantic Injection module is deployed to inject scale-aware semantics into the corresponding decoder features, aiming to fuse global and local information and enhance pyramid feature representation. Experimental results across five challenging datasets and six metrics demonstrate that our proposed method not only surpasses state-of-the-art methods but also sets a new benchmark in the field, underscoring the Polyp-Mamba framework\u2019s exceptional proficiency in the polyp segmentation tasks.", "title":"Polyp-Mamba: Polyp Segmentation with Visual Mamba", "authors":[ "Xu, Zhongxing", "Tang, Feilong", "Chen, Zhe", "Zhou, Zheng", "Wu, Weishan", "Yang, Yuyao", "Liang, Yu", "Jiang, Jiyu", "Cai, Xuyue", "Su, Jionglong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":285 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1458_paper.pdf", "bibtext":"@InProceedings{ Li_CacheDriven_MICCAI2024,\n author = { Li, Xiang and Fang, Huihui and Wang, Changmiao and Liu, Mingsi and Duan, Lixin and Xu, Yanwu },\n title = { { Cache-Driven Spatial Test-Time Adaptation for Cross-Modality Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Test-Time Adaptation (TTA) shows promise for addressing the domain gap between source and target modalities in medical image segmentation methods. Furthermore, TTA enables the model to quickly fine-tune itself during testing, enabling it to adapt to the continuously evolving data distribution in the medical clinical environment. Consequently, we introduce Spatial Test-Time Adaptation (STTA), for the first time considering the integration of inter-slice spatial information from 3D volumes with TTA. The continuously changing distribution of slice data in the target domain can lead to error accumulation and catastrophic forgetting. To tackle these challenges, we first propose reducing error accumulation by using an ensemble of multi-head predictions based on data augmentation. Secondly, for pixels with unreliable pseudo-labels, regularization is applied through entropy minimization on the ensemble of predictions from multiple heads. Finally, to prevent catastrophic forgetting, we suggest using a cache mechanism during testing to restore neuron weights from the source pre-trained model, thus effectively preserving source knowledge. The proposed STTA has been bidirectionally validated across modalities in abdominal multi-organ and brain tumor datasets, achieving a relative increase of approximately 13\\% in the Dice value in the best-case scenario compared to SOTA methods. The code is available at: https:\/\/github.com\/lixiang007666\/STTA.", "title":"Cache-Driven Spatial Test-Time Adaptation for Cross-Modality Medical Image Segmentation", "authors":[ "Li, Xiang", "Fang, Huihui", "Wang, Changmiao", "Liu, Mingsi", "Duan, Lixin", "Xu, Yanwu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lixiang007666\/STTA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":286 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1793_paper.pdf", "bibtext":"@InProceedings{ Zha_DTCA_MICCAI2024,\n author = { Zhang, Xiaoshan and Shi, Enze and Yu, Sigang and Zhang, Shu },\n title = { { DTCA: Dual-Branch Transformer with Cross-Attention for EEG and Eye Movement Data Fusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The integration of EEG and eye movements (EM) provides a comprehensive understanding of brain dynamics, yet effectively capturing key information from EEG and EM presents challenges. To overcome these, we propose DTCA, a novel multimodal fusion framework. It encodes EEG and EM data into a latent space, leveraging a multimodal fusion module to learn the facilitative information and dynamic relationships between EEG and EM data. Utilizing cross-attention with pooling computation, DTCA captures the complementary features and aggregates promoted information. Extensive experiments on multiple open datasets show that DTCA outperforms previous state-of-the-art methods: 99.15% on SEED, 99.65% on SEED-IV, and 86.05% on SEED-V datasets. We also visualize confusion matrices and features to demonstrate how DTCA works. Our findings demonstrate that (1) EEG and EM effectively distinguish changes in brain states during tasks such as watching videos. (2) Encoding EEG and EM into a latent space for fusion facilitates learning promoted information and dynamic relationships associated with brain states. (3) DTCA efficiently fuses EEG and EM data to leverage their synergistic effects in understanding the brain\u2019s dynamic processes and classifying brain states.", "title":"DTCA: Dual-Branch Transformer with Cross-Attention for EEG and Eye Movement Data Fusion", "authors":[ "Zhang, Xiaoshan", "Shi, Enze", "Yu, Sigang", "Zhang, Shu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":287 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0906_paper.pdf", "bibtext":"@InProceedings{ Liu_MOST_MICCAI2024,\n author = { Liu, Xinyu and Chen, Zhen and Yuan, Yixuan },\n title = { { MOST: Multi-Formation Soft Masking for Semi-Supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In semi-supervised medical image segmentation (SSMIS), existing methods typically impose consistency or contrastive regularizations under basic data and network perturbations, and individually segment each voxel\/pixel in the image. In fact, a dominating issue in medical scans is the intrinsic ambiguous regions due to unclear boundary and expert variability, whose segmentation requires the information in spatially nearby regions. Thus, these existing works are limited in data variety and tend to overlook the ability of inferring ambiguous regions with contextual information. To this end, we present Multi-Formation Soft Masking (MOST), a simple framework that effectively boosts SSMIS by learning spatial context relations with data regularity conditions. It first applies multi-formation function to enhance the data variety and perturbation space via partitioning and upsampling. Afterwards, each unlabeled data is soft-masked and is constrained to give invariant predictions as the original data. Therefore, the model is encouraged to infer ambiguous regions via varied granularities of contextual information conditions. Despite its simplicity, MOST achieves state-of-the-art performance on four common SSMIS benchmarks. Code and models will be released.", "title":"MOST: Multi-Formation Soft Masking for Semi-Supervised Medical Image Segmentation", "authors":[ "Liu, Xinyu", "Chen, Zhen", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/MOST-SSL4MIS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":288 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1517_paper.pdf", "bibtext":"@InProceedings{ Ma_Weakly_MICCAI2024,\n author = { Ma, Qiang and Li, Liu and Robinson, Emma C. and Kainz, Bernhard and Rueckert, Daniel },\n title = { { Weakly Supervised Learning of Cortical Surface Reconstruction from Segmentations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing learning-based cortical surface reconstruction approaches heavily rely on the supervision of pseudo ground truth (pGT) cortical surfaces for training. Such pGT surfaces are generated by traditional neuroimage processing pipelines, which are time consuming and difficult to generalize well to low-resolution brain MRI, e.g., from fetuses and neonates. In this work, we present CoSeg, a learning-based cortical surface reconstruction framework weakly supervised by brain segmentations without the need for pGT surfaces. CoSeg introduces temporal attention networks to learn time-varying velocity fields from brain MRI for diffeomorphic surface deformations, which fit an initial surface to target cortical surfaces within only 0.11 seconds for each brain hemisphere. A weakly supervised loss is designed to reconstruct pial surfaces by inflating the white surface along the normal direction towards the boundary of the cortical gray matter segmentation. This alleviates partial volume effects and encourages the pial surface to deform into deep and challenging cortical sulci. We evaluate CoSeg on 1,113 adult brain MRI at 1mm and 2mm resolution. CoSeg achieves superior geometric and morphological accuracy compared to existing learning-based approaches. We also verify that CoSeg can extract high-quality cortical surfaces from fetal brain MRI on which traditional pipelines fail to produce acceptable results.", "title":"Weakly Supervised Learning of Cortical Surface Reconstruction from Segmentations", "authors":[ "Ma, Qiang", "Li, Liu", "Robinson, Emma C.", "Kainz, Bernhard", "Rueckert, Daniel" ], "id":"Conference", "arxiv_id":"2406.12650", "GitHub":[ "https:\/\/github.com\/m-qiang\/CoSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":289 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2877_paper.pdf", "bibtext":"@InProceedings{ Zha_Lost_MICCAI2024,\n author = { Zhao, Yidong and Zhang, Yi and Simonetti, Orlando and Han, Yuchi and Tao, Qian },\n title = { { Lost in Tracking: Uncertainty-guided Cardiac Cine MRI Segmentation at Right Ventricle Base } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate biventricular segmentation of cardiac magnetic resonance (CMR) cine images is essential for the clinical evaluation of heart function. Deep-learning-based methods have achieved highly accurate segmentation performance, however, compared to left ventricle (LV), right ventricle (RV) segmentation is still more challenging and less reproducible. The degenerated performance frequently occurs at the RV base, where the in-plane anatomical structures are complex (with atria, valve, and aorta), and varying due to the strong inter-planar motion. In this work, we propose to tackle the currently unsolved issues in CMR segmentation, specifically at the RV base, with two strategies: first, we complemented the public resource by re-annotating the RV base in the ACDC dataset, with refined delineation of the right ventricle outflow tract (RVOT), under the guidance of an expert cardiologist. Second, we proposed a novel Dual-Encoder U-Net architecture that leverages temporal incoherence to inform the segmentation when inter-planar motions occur. The inter-planar motion is characterized by loss-of-tracking, via Bayesian uncertainty of a motion-tracking model. Our experiments showed that our method significantly improved the RV base segmentation by taking temporal incoherence into account. Additionally, we investigated the reproducibility of deep-learning-based segmentation and showed that the combination of consistent annotation and loss-of-tracking could enhance RV segmentation reproducibility, potentially facilitating a large number of clinical studies focusing on RV.", "title":"Lost in Tracking: Uncertainty-guided Cardiac Cine MRI Segmentation at Right Ventricle Base", "authors":[ "Zhao, Yidong", "Zhang, Yi", "Simonetti, Orlando", "Han, Yuchi", "Tao, Qian" ], "id":"Conference", "arxiv_id":"2410.03320", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":290 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2733_paper.pdf", "bibtext":"@InProceedings{ Jin_Debiased_MICCAI2024,\n author = { Jin, Ruinan and Deng, Wenlong and Chen, Minghui and Li, Xiaoxiao },\n title = { { Debiased Noise Editing on Foundation Models for Fair Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the era of Foundation Models\u2019 (FMs) rising prominence in AI, our study addresses the challenge of biases in medical images while the model operates in black-box (e.g., using FM API), particularly spurious correlations between pixels and sensitive attributes. Traditional methods for bias mitigation face limitations due to the restricted access to web-hosted FMs and difficulties in addressing the underlying bias encoded within the FM API. We propose a D(ebiased) N(oise) E(diting) strategy, termed DNE, which generates DNE to mask such spurious correlation. DNE is capable of mitigating bias both within the FM API embedding and the images themselves. Furthermore, DNE is suitable for both white-box and black-box FM APIs, where we introduced G(reedy) (Z)eroth-order) Optimization (GeZO) for it when the gradient is inaccessible in black-box APIs. Our whole pipeline enables fairness-aware image editing that can be applied across various medical contexts without requiring direct model manipulation or significant computational resources. Our empirical results demonstrate the method\u2019s effectiveness in maintaining fairness and utility across different patient groups and diseases. In the era of AI-driven medicine, this work contributes to making healthcare diagnostics more equitable, showcasing a practical solution for bias mitigation in pre-trained image FMs.", "title":"Debiased Noise Editing on Foundation Models for Fair Medical Image Classification", "authors":[ "Jin, Ruinan", "Deng, Wenlong", "Chen, Minghui", "Li, Xiaoxiao" ], "id":"Conference", "arxiv_id":"2403.06104", "GitHub":[ "https:\/\/github.com\/ubc-tea\/DNE-foundation-model-fairness" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":291 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3324_paper.pdf", "bibtext":"@InProceedings{ Lia_Overcoming_MICCAI2024,\n author = { Liang, Qinghao and Adkinson, Brendan D. and Jiang, Rongtao and Scheinost, Dustin },\n title = { { Overcoming Atlas Heterogeneity in Federated Learning for Cross-site Connectome-based Predictive Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Data-sharing in neuroimaging research alleviates the cost and time constraints of collecting large sample sizes at a single location, aiding the development of foundational models with deep learning. Yet, challenges to data sharing, such as data privacy, ownership, and regulatory compliance, exist. Federated learning enables collaborative training across sites while addressing many of these concerns. Connectomes are a promising data type for data sharing and creating foundational models. Yet, the field lacks a single, standardized atlas for constructing connectomes. Connectomes are incomparable between these atlases, limiting the utility of connectomes in federated learning. Further, fully reprocessing raw data in a single pipeline is not a solution when sample sizes range in the 10\u2013100\u2019s of thousands. Dedicated frameworks are needed to efficiently harmonize previously processed connectomes from various atlases for federated learning. We present Federate Learning for Existing Connectomes from Heterogeneous Atlases (FLECHA) to addresses these challenges. FLECHA learns a mapping between atlas spaces on an independent dataset, enabling the transformation of connectomes to a common target space before federated learning. We assess FLECHA using functional and structural connectomes processed with five atlases from the Human Connectome Project. Our results show improved prediction performance for FLECHA. They also demonstrate the potential of FLECHA to generalize connectome-based models across diverse silos, potentially enhancing the application of deep learning in neuroimaging.", "title":"Overcoming Atlas Heterogeneity in Federated Learning for Cross-site Connectome-based Predictive Modeling", "authors":[ "Liang, Qinghao", "Adkinson, Brendan D.", "Jiang, Rongtao", "Scheinost, Dustin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/qinghaoliang\/Federated-learning_across_atlases" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":292 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1495_paper.pdf", "bibtext":"@InProceedings{ Lee_COVID19_MICCAI2024,\n author = { Lee, Jong Bub and Kim, Jung Soo and Lee, Hyun Gyu },\n title = { { COVID19 to Pneumonia: Multi Region Lung Severity Classification using CNN Transformer Position-Aware Feature Encoding Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study investigates utilizing chest X-ray (CXR) data from COVID-19 patients for classifying pneumonia severity, aiming to enhance prediction accuracy in COVID-19 datasets and achieve robust classification across diverse pneumonia cases. A novel CNN-Transformer hybrid network has been developed, leveraging position-aware features and Region Shared MLPs for integrating lung region information. This improves adaptability to different spatial resolutions and scores, addressing the subjectivity of severity assessment due to unclear clinical measurements. The model shows significant improvement in pneumonia severity classification for both COVID-19 and heterogeneous pneumonia datasets. Its adaptable structure allows seamless integration with various backbone models, leading to continuous performance improvement and potential clinical applications, particularly in intensive care units.", "title":"COVID19 to Pneumonia: Multi Region Lung Severity Classification using CNN Transformer Position-Aware Feature Encoding Network", "authors":[ "Lee, Jong Bub", "Kim, Jung Soo", "Lee, Hyun Gyu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/blind4635\/Multi-Region-Lung-Severity-PAFE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":293 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3866_paper.pdf", "bibtext":"@InProceedings{ Thr_TESSL_MICCAI2024,\n author = { Thrasher, Jacob and Devkota, Alina and Tafti, Ahmad P. and Bhattarai, Binod and Gyawali, Prashnna and the Alzheimer\u2019s Disease Neuroimaging Initiative },\n title = { { TE-SSL: Time and Event-aware Self Supervised Learning for Alzheimer\u2019s Disease Progression Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Alzheimer\u2019s Disease (AD) represents one of the most pressing challenges in the field of neurodegenerative disorders, with its progression analysis being crucial for understanding disease dynamics and developing targeted interventions. Recent advancements in deep learning and various representation learning strategies, including self-supervised learning (SSL), have shown significant promise in enhancing medical image analysis, providing innovative ways to extract meaningful patterns from complex data. Notably, the computer vision literature has demonstrated that incorporating supervisory signals into SSL can further augment model performance by guiding the learning process with additional relevant information. However, the application of such supervisory signals in the context of disease progression analysis remains largely unexplored. This gap is particularly pronounced given the inherent challenges of incorporating both event and time-to-event information into the learning paradigm. Addressing this, we propose a novel framework, Time and Event-aware SSL (TE-SSL), which integrates time-to-event and event and data as supervisory signals to refine the learning process. Our comparative analysis with existing SSL-based methods in the downstream task of survival analysis shows superior performance across standard metrics.", "title":"TE-SSL: Time and Event-aware Self Supervised Learning for Alzheimer\u2019s Disease Progression Analysis", "authors":[ "Thrasher, Jacob", "Devkota, Alina", "Tafti, Ahmad P.", "Bhattarai, Binod", "Gyawali, Prashnna", "the Alzheimer\u2019s Disease Neuroimaging Initiative" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/jacob-thrasher\/TE-SSL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":294 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1426_paper.pdf", "bibtext":"@InProceedings{ Kim_SSYNTH_MICCAI2024,\n author = { Kim, Andrea and Saharkhiz, Niloufar and Sizikova, Elena and Lago, Miguel and Sahiner, Berkman and Delfino, Jana and Badano, Aldo },\n title = { { S-SYNTH: Knowledge-Based, Synthetic Generation of Skin Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Development of artificial intelligence (AI) techniques in medical imaging requires access to large-scale and diverse datasets for training and evaluation. In dermatology, obtaining such datasets remains challenging due to significant variations in patient populations, illumination conditions, and acquisition system characteristics. In this work, we propose S-SYNTH, the first knowledge-based, adaptable open-source skin simulation framework to rapidly generate synthetic skin, 3D models and digitally rendered images, using an anatomically inspired multi-layer, multi-component skin and growing lesion model. The skin model allows for controlled variation in skin appearance, such as skin color, presence of hair, lesion shape, and blood fraction among other parameters. We use this framework to study the effect of possible variations on the development and evaluation of AI models for skin lesion segmentation, and show that results obtained using synthetic data follow similar comparative trends as real dermatologic images, while mitigating biases and limitations from existing datasets including small dataset size, lack of diversity, and underrepresentation.", "title":"S-SYNTH: Knowledge-Based, Synthetic Generation of Skin Images", "authors":[ "Kim, Andrea", "Saharkhiz, Niloufar", "Sizikova, Elena", "Lago, Miguel", "Sahiner, Berkman", "Delfino, Jana", "Badano, Aldo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/DIDSR\/ssynth-release" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":295 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2219_paper.pdf", "bibtext":"@InProceedings{ Yun_RegionSpecific_MICCAI2024,\n author = { Yung, Ka-Wai and Sivaraj, Jayaram and Stoyanov, Danail and Loukogeorgakis, Stavros and Mazomenos, Evangelos B. },\n title = { { Region-Specific Retrieval Augmentation for Longitudinal Visual Question Answering: A Mix-and-Match Paradigm } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Visual Question Answering (VQA) has advanced in recent years, inspiring adaptations to radiology for medical diagnosis. Longitudinal VQA, which requires an understanding of changes in images over time, can further support patient monitoring and treatment decision making. This work introduces RegioMix, a retrieval augmented paradigm for longitudinal VQA, formulating a novel approach that generates retrieval objects through a mix-and-match technique, utilizing different regions from various retrieved images. Furthermore, this process generates a pseudo-difference description based on the retrieved pair, by leveraging available reports form each retrieved region. To align such statements to both the posted question and input image pair, we introduce a Dual Alignment module. Experiments on the MIMIC-Diff-VQA X-ray dataset demonstrate our method\u2019s superiority, outperforming the state-of-the-art by 77.7 in CIDEr score and 8.3% in BLEU-4, while relying solely on the training dataset for retrieval, showcasing the effectiveness of our approach. Code is available at https:\/\/github.com\/KawaiYung\/RegioMix", "title":"Region-Specific Retrieval Augmentation for Longitudinal Visual Question Answering: A Mix-and-Match Paradigm", "authors":[ "Yung, Ka-Wai", "Sivaraj, Jayaram", "Stoyanov, Danail", "Loukogeorgakis, Stavros", "Mazomenos, Evangelos B." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/KawaiYung\/RegioMix" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":296 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0050_paper.pdf", "bibtext":"@InProceedings{ Xu_FMABS_MICCAI2024,\n author = { Xu, Zhe and Chen, Cheng and Lu, Donghuan and Sun, Jinghan and Wei, Dong and Zheng, Yefeng and Li, Quanzheng and Tong, Raymond Kai-yu },\n title = { { FM-ABS: Promptable Foundation Model Drives Active Barely Supervised Learning for 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning (SSL) has significantly advanced 3D medical image segmentation by effectively reducing the need for laborious dense labeling from radiologists. Traditionally focused on \\textit{model-centric} advancements, we anticipate that the SSL landscape will shift due to the emergence of open-source generalist foundation models, e.g., Segment Anything Model (SAM). These generalists have shown remarkable zero-shot segmentation capabilities with manual prompts, allowing a promising \\textit{data-centric} perspective for future SSL, particularly in pseudo and expert labeling strategies for enhancing the data pool. To this end, we propose the Foundation Model-driven Active Barely Supervised (FM-ABS) learning paradigm for developing customized 3D specialist segmentation models with shoestring annotation budgets, i.e., merely labeling three slices per scan. Specifically, building upon the basic mean-teacher framework, FM-ABS accounts for the intrinsic characteristics of 3D imaging and modernizes the SSL paradigm with two key data-centric designs: (i) specialist-generalist collaboration where the in-training specialist model delivers class-specific prompts to interact with the frozen class-agnostic generalist model across multiple views to acquire noisy-yet-effective pseudo labels, and (ii) expert-model collaboration that advocates active cross-labeling with notably low annotation efforts to progressively provide the specialist model with informative and efficient supervision in a human-in-the-loop manner, which benefits the automatic object-specific prompt generation in turn. Extensive experiments on two benchmark datasets show the promising results of our approach over recent SSL methods under extremely limited (barely) labeling budgets.", "title":"FM-ABS: Promptable Foundation Model Drives Active Barely Supervised Learning for 3D Medical Image Segmentation", "authors":[ "Xu, Zhe", "Chen, Cheng", "Lu, Donghuan", "Sun, Jinghan", "Wei, Dong", "Zheng, Yefeng", "Li, Quanzheng", "Tong, Raymond Kai-yu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":297 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0320_paper.pdf", "bibtext":"@InProceedings{ Mia_FMOSD_MICCAI2024,\n author = { Miao, Juzheng and Chen, Cheng and Zhang, Keli and Chuai, Jie and Li, Quanzheng and Heng, Pheng-Ann },\n title = { { FM-OSD: Foundation Model-Enabled One-Shot Detection of Anatomical Landmarks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"One-shot detection of anatomical landmarks is gaining significant attention for its efficiency in using minimal labeled data to produce promising results. However, the success of current methods heavily relies on the employment of extensive unlabeled data to pre-train an effective feature extractor, which limits their applicability in scenarios where a substantial amount of unlabeled data is unavailable. In this paper, we propose the first foundation model-enabled one-shot landmark detection (FM-OSD) framework for accurate landmark detection in medical images by utilizing solely a single template image without any additional unlabeled data. Specifically, we use the frozen image encoder of visual foundation models as the feature extractor, and introduce dual-branch global and local feature decoders to increase the resolution of extracted features in a coarse to fine manner. The introduced feature decoders are efficiently trained with a distance-aware similarity learning loss to incorporate domain knowledge from the single template image. Moreover, a novel bidirectional matching strategy is developed to improve both robustness and accuracy of landmark detection in the case of scattered similarity map obtained by foundation models. We validate our method on two public anatomical landmark detection datasets. By using solely a single template image, our method demonstrates significant superiority over strong state-of-the-art one-shot landmark detection methods.", "title":"FM-OSD: Foundation Model-Enabled One-Shot Detection of Anatomical Landmarks", "authors":[ "Miao, Juzheng", "Chen, Cheng", "Zhang, Keli", "Chuai, Jie", "Li, Quanzheng", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JuzhengMiao\/FM-OSD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":298 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3688_paper.pdf", "bibtext":"@InProceedings{ Wan_Adaptive_MICCAI2024,\n author = { Wang, Xinkai and Shi, Yonggang },\n title = { { Adaptive Subtype and Stage Inference for Alzheimer\u2019s Disease } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Subtype and Stage Inference (SuStaIn) is a useful Event-based Model for capturing both the temporal and the phenotypical patterns for any progressive disorders, which is essential for understanding the heterogeneous nature of such diseases. However, this model cannot capture subtypes with different progression rates with respect to predefined biomarkers with fixed events prior to inference. Therefore, we propose an adaptive algorithm for learning subtype-specific events while making subtype and stage inference. We use simulation to demonstrate the improvement with respect to various performance metrics. Finally, we provide snapshots of different levels of biomarker abnormality within different subtypes on Alzheimer\u2019s Disease (AD) data to demonstrate the effectiveness of our algorithm.", "title":"Adaptive Subtype and Stage Inference for Alzheimer\u2019s Disease", "authors":[ "Wang, Xinkai", "Shi, Yonggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/x5wang\/Adaptive-Subtype-and-Stage-Inference" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":299 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0250_paper.pdf", "bibtext":"@InProceedings{ Lin_Learning_MICCAI2024,\n author = { Lin, Yiqun and Wang, Hualiang and Chen, Jixiang and Li, Xiaomeng },\n title = { { Learning 3D Gaussians for Extremely Sparse-View Cone-Beam CT Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cone-Beam Computed Tomography (CBCT) is an indispensable technique in medical imaging, yet the associated radiation exposure raises concerns in clinical practice. To mitigate these risks, sparse-view reconstruction has emerged as an essential research direction, aiming to reduce the radiation dose by utilizing fewer projections for CT reconstruction. Although implicit neural representations have been introduced for sparse-view CBCT reconstruction, existing methods primarily focus on local 2D features queried from sparse projections, which is insufficient to process the more complicated anatomical structures, such as the chest. To this end, we propose a novel reconstruction framework, namely DIF-Gaussian, which leverages 3D Gaussians to represent the feature distribution in the 3D space, offering additional 3D spatial information to facilitate the estimation of attenuation coefficients. Furthermore, we incorporate test-time optimization during inference to further improve the generalization capability of the model. We evaluate DIF-Gaussian on two public datasets, showing significantly superior reconstruction performance than previous state-of-the-art methods.", "title":"Learning 3D Gaussians for Extremely Sparse-View Cone-Beam CT Reconstruction", "authors":[ "Lin, Yiqun", "Wang, Hualiang", "Chen, Jixiang", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"2407.01090", "GitHub":[ "https:\/\/github.com\/xmed-lab\/DIF-Gaussian" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":300 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1221_paper.pdf", "bibtext":"@InProceedings{ Pan_PGMLIF_MICCAI2024,\n author = { Pan, Xipeng and An, Yajun and Lan, Rushi and Liu, Zhenbing and Liu, Zaiyi and Lu, Cheng and Yang, Huihua },\n title = { { PG-MLIF: Multimodal Low-rank Interaction Fusion Framework Integrating Pathological Images and Genomic Data for Cancer Prognosis Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Precise prognostication can assist physicians in developing personalized treatment and follow-up plans, which help enhance the overall survival rates. Recently, enormous amount of research rely on unimodal data for survival prediction, not fully capitalizing on the complementary information available. With this deficiency, we propose a Multimodal Low-rank Interaction Fusion Framework Integrating Pathological images and Genomic data (PG-MLIF) for survival prediction. In this framework, we leverage the gating-based modality attention mechanism (MAM) for effective filtering at the feature level and propose the optimal weight concatenation (OWC) strategy to maximize the integration of information from pathological images, genomic data, and fused features at the model level. The model introduces a parallel decomposition strategy called low-rank multimodal fusion (LMF) for the first time, which simplifies the complexity and facilitates model contribution-based fusion, addressing the challenge of incomplete and inefficient multimodal fusion. Extensive experiments on the public dataset of GBMLGG and KIRC demonstrate that our PG-MLIF outperforms state-of-the-art survival prediction methods. Additionally, we significantly stratify patients based on the hazard ratios obtained from training the two types of datasets, and the visualization results were generally consistent with the true grade classification. The code is available at: https:\/\/github.com\/panxipeng\/PG-MLIF.", "title":"PG-MLIF: Multimodal Low-rank Interaction Fusion Framework Integrating Pathological Images and Genomic Data for Cancer Prognosis Prediction", "authors":[ "Pan, Xipeng", "An, Yajun", "Lan, Rushi", "Liu, Zhenbing", "Liu, Zaiyi", "Lu, Cheng", "Yang, Huihua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/panxipeng\/PG-MLIF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":301 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0942_paper.pdf", "bibtext":"@InProceedings{ He_Pair_MICCAI2024,\n author = { He, Jianjun and Cai, Chenyu and Li, Qiong and Ma, Andy J },\n title = { { Pair Shuffle Consistency for Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised medical image segmentation is a practical but challenging problem, in which only limited pixel-wise annotations are available for training. While most existing methods train a segmentation model by using the labeled and unlabeled data separately, the learning paradigm solely based on unlabeled data is less reliable due to the possible incorrectness of pseudo labels. In this paper, we propose a novel method namely pair shuffle consistency (PSC) learning for semi-supervised medical image segmentation. The pair shuffle operation splits an image pair into patches, and then randomly shuffle them to obtain mixed images. With the shuffled images for training, local information is better interpreted for pixel-wise predictions. The consistency learning of labeled-unlabeled image pairs becomes more reliable, since predictions of the unlabeled data can be learned from those of the labeled data with ground truth. To enhance the model robustness, the consistency constraint on unlabeled-unlabeled image pairs serves as a regularization term, thereby further improving the segmentation performance. Experiments on three benchmarks demonstrate that our method outperforms the state of the art for semi-supervised medical image segmentation.", "title":"Pair Shuffle Consistency for Semi-supervised Medical Image Segmentation", "authors":[ "He, Jianjun", "Cai, Chenyu", "Li, Qiong", "Ma, Andy J" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":302 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1260_paper.pdf", "bibtext":"@InProceedings{ Xu_MiHATPA_MICCAI2024,\n author = { Xu, Zhufeng and Qin, Jiaxin and Li, Chenhao and Bu, Dechao and Zhao, Yi },\n title = { { MiHATP:A Multi-Hybrid Attention Super-Resolution Network for Pathological Image Based on Transformation Pool Contrastive Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Digital pathology slides can serve medical practitioners or aid in computer-assisted diagnosis and treatment. Collection personnel typically employ hyperspectral microscopes to scan pathology slides into Whole Slide Images (WSI) with pixel counts reaching the million level. However, this process incurs significant acquisition time and data storage costs. Utilizing super-resolution imaging techniques to enhance low-resolution pathological images enables downstream analysis of pathological tissue slice data under low-resource and cost-effective medical conditions. Nevertheless, existing super-resolution methods cannot integrate attention information containing variable receptive fields and effective means to handle distortions and artifacts in the output data. This leads to differences between super-resolution images and authentic images depicting cell contours and tissue morphology. We propose a method named MiHATP: A Multi(Mi)-Hybrid(H) Attention(A) Network Based on Transformation(T) Pool(P) Contrastive Learning to address these challenges. By constructing contrastive losses through reversible image transformation and irreversible low-quality image transformation, MiHATP effectively reduces distortion in super-resolution pathological images. Additionally, within MiHATP, we design a Multi-Hybrid Attention structure to ensure strong modeling capability for long-distance and short-distance information, thereby ensuring that the super-resolution network can obtain richer image information. Experimental results demonstrate superior performance compared to existing methods. Furthermore, we conduct tests on the output images of the super-resolution network for downstream cell segmentation and phenotypes tasks, achieving performance similar to that of original high-resolution images.", "title":"MiHATP:A Multi-Hybrid Attention Super-Resolution Network for Pathological Image Based on Transformation Pool Contrastive Learning", "authors":[ "Xu, Zhufeng", "Qin, Jiaxin", "Li, Chenhao", "Bu, Dechao", "Zhao, Yi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/rabberk\/MiHATP.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":303 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2292_paper.pdf", "bibtext":"@InProceedings{ Dwe_Estimating_MICCAI2024,\n author = { Dwedari, Mohammed Munzer and Consagra, William and M\u00fcller, Philip and Turgut, O\u0308zgu\u0308n and Rueckert, Daniel and Rathi, Yogesh },\n title = { { Estimating Neural Orientation Distribution Fields on High Resolution Diffusion MRI Scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Orientation Distribution Function (ODF) characterizes key brain microstructural properties and plays an important role in understanding brain structural connectivity. Recent works introduced Implicit Neural Representation (INR) based approaches to form a spatially aware continuous estimate of the ODF field and demonstrated promising results in key tasks of interest when compared to conventional discrete approaches.\nHowever, traditional INR methods face difficulties when scaling to large-scale images, such as modern ultra-high-resolution MRI scans, posing challenges in learning fine structures as well as inefficiencies in training and inference speed. In this work, we propose HashEnc, a grid-hash-encoding-based estimation of the ODF field and demonstrate its effectiveness in retaining structural and textural features. We show that HashEnc achieves a 10% enhancement in image quality while requiring 3x less computational resources than current methods.", "title":"Estimating Neural Orientation Distribution Fields on High Resolution Diffusion MRI Scans", "authors":[ "Dwedari, Mohammed Munzer", "Consagra, William", "M\u00fcller, Philip", "Turgut, O\u0308zgu\u0308n", "Rueckert, Daniel", "Rathi, Yogesh" ], "id":"Conference", "arxiv_id":"2409.09387", "GitHub":[ "https:\/\/github.com\/MunzerDw\/NODF-HashEnc" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":304 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1398_paper.pdf", "bibtext":"@InProceedings{ Lei_Weaksupervised_MICCAI2024,\n author = { Lei, Haijun and Tong, Guanjiie and Su, Huaqiang and Lei, Baiying },\n title = { { Weak-supervised Attention Fusion Network for Carotid Artery Vessel Wall Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The automatic and accurate segmentation of the carotid artery vessel wall can assist doctors in clinical diagnosis. Medical images often have complex and blurry features, which makes manual data annotation very difficult and time-consuming. 3D CNN can utilize three-dimensional spatial information to more accurately identify diseased tissues and organ structures, but its segmentation performance is limited due to the lack of global contextual information correlation. This paper proposes a network based on CNN and Transformer to segment the carotid artery vessel wall. By combining the effectiveness of CNN in dealing with 3D image segmentation problems and the global attention mechanism of Transformer, it is possible to better capture and process the features of this information. By designing Joint Attention Structure Block (JAS), semantic information in skip connections can be enhanced. The feature fusion block (FF) is used to associate input information with each layer of feature maps, enhancing the detailed information of the feature maps. The effectiveness of this method has been verified through a large number of comparative experiments.", "title":"Weak-supervised Attention Fusion Network for Carotid Artery Vessel Wall Segmentation", "authors":[ "Lei, Haijun", "Tong, Guanjiie", "Su, Huaqiang", "Lei, Baiying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":305 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0411_paper.pdf", "bibtext":"@InProceedings{ Yan_Generating_MICCAI2024,\n author = { Yang, Jiancheng and Sedykh, Ekaterina and Adhinarta, Jason Ken and Le, Hieu and Fua, Pascal },\n title = { { Generating Anatomically Accurate Heart Structures via Neural Implicit Fields } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Implicit functions have significantly advanced shape modeling in diverse fields. Yet, their application within medical imaging often overlooks the intricate interrelations among various anatomical structures, a consideration crucial for accurately modeling complex multi-part structures like the heart. This study presents ImHeart, a latent variable model specifically designed to model complex heart structures. Leveraging the power of learnable templates, ImHeart adeptly captures the nuanced relationships between multiple heart components using a unified deformation field and introduces an implicit registration technique to manage the pose variability in medical data. Built on WHS3D dataset of 140 refined whole-heart structures, ImHeart delivers superior reconstruction accuracy and anatomical fidelity. Moreover, we demonstrate the ImHeart can significantly improve heart segmentation from multi-center MRI scans through a retraining pipeline, adeptly navigating the domain gaps inherent to such data.", "title":"Generating Anatomically Accurate Heart Structures via Neural Implicit Fields", "authors":[ "Yang, Jiancheng", "Sedykh, Ekaterina", "Adhinarta, Jason Ken", "Le, Hieu", "Fua, Pascal" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":306 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4128_paper.pdf", "bibtext":"@InProceedings{ Li_ASPS_MICCAI2024,\n author = { Li, Huiqian and Zhang, Dingwen and Yao, Jieru and Han, Longfei and Li, Zhongyu and Han, Junwei },\n title = { { ASPS: Augmented Segment Anything Model for Polyp Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Polyp segmentation plays a pivotal role in colorectal cancer diagnosis. Recently, the emergence of the Segment Anything Model (SAM) has introduced unprecedented potential for polyp segmentation, leveraging its powerful pre-training capability on large-scale datasets. However, due to the domain gap between natural and endoscopy images, SAM encounters two limitations in achieving effective performance in polyp segmentation. Firstly, its Transformer-based structure prioritizes global and low-frequency information, potentially overlooking local details, and introducing bias into the learned features. Secondly, when applied to endoscopy images, its poor out-of-distribution (OOD) performance results in substandard predictions and biased confidence output. To tackle these challenges, we introduce a novel approach named Augmented SAM for Polyp Segmentation (ASPS), equipped with two modules: Cross-branch Feature Augmentation (CFA) and Uncertainty-guided Prediction Regularization (UPR). CFA integrates a trainable CNN encoder branch with a frozen ViT encoder, enabling the integration of domain-specific knowledge while enhancing local features and high-frequency details. Moreover, UPR ingeniously leverages SAM\u2019s IoU score to mitigate uncertainty during the training procedure, thereby improving OOD performance and domain generalization. Extensive experimental results demonstrate the effectiveness and utility of the proposed method in improving SAM\u2019s performance in polyp segmentation. Our code is available at https:\/\/github.com\/HuiqianLi\/ASPS.", "title":"ASPS: Augmented Segment Anything Model for Polyp Segmentation", "authors":[ "Li, Huiqian", "Zhang, Dingwen", "Yao, Jieru", "Han, Longfei", "Li, Zhongyu", "Han, Junwei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HuiqianLi\/ASPS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":307 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1759_paper.pdf", "bibtext":"@InProceedings{ Liu_Learning_MICCAI2024,\n author = { Liu, Hong and Wei, Dong and Lu, Donghuan and Sun, Jinghan and Zheng, Hao and Zheng, Yefeng and Wang, Liansheng },\n title = { { Learning to Segment Multiple Organs from Multimodal Partially Labeled Datasets } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Learning to segment multiple organs from partially labeled medical image datasets can significantly reduce the burden of manual annotation. However, due to the large domain gap, learning from partially labeled datasets of different modalities has not been well addressed in the literature.In addition, the anatomic prior knowledge of various organs is spread in multiple datasets and needs to be more effectively utilized. This work proposes a novel framework for learning to segment multiple organs from multimodal partially labeled datasets (i.e., CT and MRI). Specifically, our framework constructs a cross-modal a priori atlas from training data, which implicitly contains prior knowledge of organ locations, shapes, and sizes. Based on the atlas, three novel modules are proposed to utilize the prior knowledge to address the joint challenges of unlabeled organs and inter-modal domain gaps: 1) to better utilize unlabeled organs for training, we propose an atlas-guided pseudo-label refiner network (APRN) to improve the quality of pseudo-labels; 2) we propose an atlas-conditioned modality alignment network (AMAN) for cross-modal alignment in the label space via adversarial training, forcing cross-modal segmentations of organs labeled in a different modality to match the atlas; and 3) to further align organ-specific semantics in the latent space, we introduce modal-invariant class prototype anchoring modules (MICPAMs) supervised by the atlas-guided refined pseudo-labels, encouraging domain-invariant features for each organ. Extensive experiments on both multimodal and monomodal partially labeled datasets demonstrate the superior performance of our framework to existing state-of-the-art methods and the efficacy of its components.", "title":"Learning to Segment Multiple Organs from Multimodal Partially Labeled Datasets", "authors":[ "Liu, Hong", "Wei, Dong", "Lu, Donghuan", "Sun, Jinghan", "Zheng, Hao", "Zheng, Yefeng", "Wang, Liansheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ccarliu\/multimodal-PL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":308 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1953_paper.pdf", "bibtext":"@InProceedings{ Yan_ANew_MICCAI2024,\n author = { Yan, Yunlu and Zhu, Lei and Li, Yuexiang and Xu, Xinxing and Goh, Rick Siow Mong and Liu, Yong and Khan, Salman and Feng, Chun-Mei },\n title = { { A New Perspective to Boost Performance Fairness For Medical Federated Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Improving the fairness of federated learning (FL) benefits healthy and sustainable collaboration, especially for medical applications. However, existing fair FL methods ignore the specific characteristics of medical FL applications, i.e., domain shift among the datasets from different hospitals. In this work, we propose Fed-LWR to improve performance fairness from the perspective of feature shift, a key issue influencing the performance of medical FL systems caused by domain shift. Specifically, we dynamically perceive the bias of the global model across all hospitals by estimating the layer-wise difference in feature representations between local and global models. To minimize global divergence, we assign higher weights to hospitals with larger differences. The estimated client weights help us to re-aggregate the local models per layer to obtain a fairer global model. We evaluate our method on two widely used federated medical image segmentation benchmarks. The results demonstrate that our method achieves better and fairer performance compared with several state-of-the-art fair FL methods.", "title":"A New Perspective to Boost Performance Fairness For Medical Federated Learning", "authors":[ "Yan, Yunlu", "Zhu, Lei", "Li, Yuexiang", "Xu, Xinxing", "Goh, Rick Siow Mong", "Liu, Yong", "Khan, Salman", "Feng, Chun-Mei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/IAMJackYan\/Fed-LWR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":309 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0603_paper.pdf", "bibtext":"@InProceedings{ Fis_SubgroupSpecific_MICCAI2024,\n author = { Fischer, Paul and Willms, Hannah and Schneider, Moritz and Thorwarth, Daniela and Muehlebach, Michael and Baumgartner, Christian F. },\n title = { { Subgroup-Specific Risk-Controlled Dose Estimation in Radiotherapy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cancer remains a leading cause of death, highlighting the importance of effective radiotherapy (RT). Magnetic resonance-guided linear accelerators (MR-Linacs) enable imaging during RT, allowing for inter-fraction, and perhaps even intra-fraction, adjustments of treatment plans. However, achieving this requires fast and accurate dose calculations. While Monte Carlo simulations offer accuracy, they are computationally intensive. Deep learning frameworks show promise, yet lack uncertainty quantification crucial for high-risk applications like RT. Risk-controlling prediction sets (RCPS) offer model-agnostic uncertainty quantification with mathematical guarantees. However, we show that naive application of RCPS may lead to only certain subgroups such as the image background being risk-controlled. In this work, we extend RCPS to provide prediction intervals with coverage guarantees for multiple subgroups with unknown subgroup membership at test time. We evaluate our algorithm on real clinical planing volumes from five different anatomical regions and show that our novel subgroup RCPS (SG-RCPS) algorithm leads to prediction intervals that jointly control the risk for multiple subgroups. In particular, our method controls the risk of the crucial voxels along the radiation beam significantly better than conventional RCPS.", "title":"Subgroup-Specific Risk-Controlled Dose Estimation in Radiotherapy", "authors":[ "Fischer, Paul", "Willms, Hannah", "Schneider, Moritz", "Thorwarth, Daniela", "Muehlebach, Michael", "Baumgartner, Christian F." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/paulkogni\/SG-RCPS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":310 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3422_paper.pdf", "bibtext":"@InProceedings{ Deu_Neural_MICCAI2024,\n author = { Deutges, Michael and Sadafi, Ario and Navab, Nassir and Marr, Carsten },\n title = { { Neural Cellular Automata for Lightweight, Robust and Explainable Classification of White Blood Cell Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diagnosis of hematological malignancies depends on accurate identification of white blood cells in peripheral blood smears. Deep learning techniques are emerging as a viable solution to scale and optimize this process by automatic cell classification. However, these techniques face several challenges such as limited generalizability, sensitivity to domain shifts, and lack of explainability. Here, we introduce a novel approach for white blood cell classification based on neural cellular automata (NCA). We test our approach on three datasets of white blood cell images and show that we achieve competitive performance compared to conventional methods. Our NCA-based method is significantly smaller in terms of parameters and exhibits robustness to domain shifts. Furthermore, the architecture is inherently explainable, providing insights into the decision process for each classification, which helps to understand and validate model predictions. Our results demonstrate that NCA can be used for image classification, and that they address key challenges of conventional methods, indicating a high potential for applicability in clinical practice.", "title":"Neural Cellular Automata for Lightweight, Robust and Explainable Classification of White Blood Cell Images", "authors":[ "Deutges, Michael", "Sadafi, Ario", "Navab, Nassir", "Marr, Carsten" ], "id":"Conference", "arxiv_id":"2404.05584", "GitHub":[ "https:\/\/github.com\/marrlab\/WBC-NCA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":311 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0979_paper.pdf", "bibtext":"@InProceedings{ Zha_M2Fusion_MICCAI2024,\n author = { Zhang, Song and Du, Siyao and Sun, Caixia and Li, Bao and Shao, Lizhi and Zhang, Lina and Wang, Kun and Liu, Zhenyu and Tian, Jie },\n title = { { M2Fusion: Multi-time Multimodal Fusion for Prediction of Pathological Complete Response in Breast Cancer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate identification of patients who achieve pathological complete response (pCR) after neoadjuvant chemotherapy (NAC) is critical before surgery for guiding customized treatment regimens and assessing prognosis in breast cancer. However, current methods for predicting pCR primarily rely on single modality data or single time-point images, which fail to capture tumor changes and comprehensively represent tumor heterogeneity at both macro and micro levels. Additionally, complementary information between modalities is not fully interacted. In this paper, we present M2Fusion, pioneering the fusion of multi-time multimodal data for treatment response prediction, with two key components: the multi-time magnetic resonance imagings (MRIs) contrastive learning loss that learns representations reflecting NAC-induced tumor changes; the orthogonal multimodal fusion module that integrates orthogonal information from MRIs and whole slide images (WSIs). To evaluate the proposed M2Fusion, we collect pre-treatment MRI, post-treatment MRI, and WSIs of biopsy from patients with breast cancer at two different collaborating hospitals, each with the pCR assessed by the standard pathological procedure. Experimental results quantitatively reveal that the proposed M2Fusion improves treatment response prediction and outperforms other multimodal fusion methods and single-modality approaches. Validation on external test sets further demonstrates the generalization and validity of the model. Our code is available at https:\/\/github.com\/SongZHS\/M2Fusion.", "title":"M2Fusion: Multi-time Multimodal Fusion for Prediction of Pathological Complete Response in Breast Cancer", "authors":[ "Zhang, Song", "Du, Siyao", "Sun, Caixia", "Li, Bao", "Shao, Lizhi", "Zhang, Lina", "Wang, Kun", "Liu, Zhenyu", "Tian, Jie" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/SongZHS\/M2Fusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":312 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2801_paper.pdf", "bibtext":"@InProceedings{ Zer_AMONuSeg_MICCAI2024,\n author = { Zerouaoui, Hasnae and Oderinde, Gbenga Peter and Lefdali, Rida and Echihabi, Karima and Akpulu, Stephen Peter and Agbon, Nosereme Abel and Musa, Abraham Sunday and Yeganeh, Yousef and Farshad, Azade and Navab, Nassir },\n title = { { AMONuSeg: A Histological Dataset for African Multi-Organ Nuclei Semantic Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Nuclei semantic segmentation is a key component for advancing machine learning and deep learning applications in digital pathology. However, most existing segmentation models are trained and tested on high-quality data acquired with expensive equipment, such as whole slide scanners, which are not accessible to most pathologists in developing countries. These pathologists rely on low-resource data acquired with low-precision microscopes, smartphones, or digital cameras, which have different characteristics and challenges than high-resource data. Therefore, there is a gap between the state-of-the-art segmentation models and the real-world needs of low-resource settings. This work aims to bridge this gap by presenting the first fully annotated African multi-organ dataset for histopathology nuclei semantic segmentation acquired with a low-precision microscope. We also evaluate state-of-the-art segmentation models, including spectral feature extraction encoder and vision transformer-based models, and stain normalization techniques for color normalization of Hematoxylin and Eosin-stained histopathology slides. Our results provide important insights for future research on nuclei histopathology segmentation with low-resource data.", "title":"AMONuSeg: A Histological Dataset for African Multi-Organ Nuclei Semantic Segmentation", "authors":[ "Zerouaoui, Hasnae", "Oderinde, Gbenga Peter", "Lefdali, Rida", "Echihabi, Karima", "Akpulu, Stephen Peter", "Agbon, Nosereme Abel", "Musa, Abraham Sunday", "Yeganeh, Yousef", "Farshad, Azade", "Navab, Nassir" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zerouaoui\/AMONUSEG" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":313 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2599_paper.pdf", "bibtext":"@InProceedings{ Guo_Common_MICCAI2024,\n author = { Guo, Yunpeng and Zeng, Xinyi and Zeng, Pinxian and Fei, Yuchen and Wen, Lu and Zhou, Jiliu and Wang, Yan },\n title = { { Common Vision-Language Attention for Text-Guided Medical Image Segmentation of Pneumonia } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pneumonia, recognized as a severe respiratory disease, has attracted widespread attention in the wake of the COVID-19 pandemic, underscoring the critical need for precise diagnosis and effective treatment. Despite significant advancements in the automatic segmentation of lung infection areas using medical imaging, most current approaches rely solely on a large quantity of high-quality images for training, which is not practical in clinical settings. Moreover, the unimodal attention mechanisms adopted in conventional vision-language models encounter challenges in effectively preserving and integrating information across modalities. To alleviate these problems, we introduce Text-Guided Common Attention Model (TGCAM), a novel method for text-guided medical image segmentation of pneumonia. Text-Guided means inputting both an image and its corresponding text into the model simultaneously to obtain segmentation results. Specifically, TGCAM encompasses the introduction of Common Attention, a multimodal interaction paradigm between vision and language, applied during the decoding phase. In addition, we present an Iterative Text Enhancement Module that facilitates the progressive refinement of text, thereby augmenting multi-modal interactions. Experiments respectively on public CT and X-ray datasets demonstrated our method outperforms the state-of-the-art methods qualitatively and quantitatively.", "title":"Common Vision-Language Attention for Text-Guided Medical Image Segmentation of Pneumonia", "authors":[ "Guo, Yunpeng", "Zeng, Xinyi", "Zeng, Pinxian", "Fei, Yuchen", "Wen, Lu", "Zhou, Jiliu", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/G-peppa\/TGCAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":314 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0796_paper.pdf", "bibtext":"@InProceedings{ Guo_HistGen_MICCAI2024,\n author = { Guo, Zhengrui and Ma, Jiabo and Xu, Yingxue and Wang, Yihui and Wang, Liansheng and Chen, Hao },\n title = { { HistGen: Histopathology Report Generation via Local-Global Feature Encoding and Cross-modal Context Interaction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Histopathology serves as the gold standard in cancer diagnosis, with clinical reports being vital in interpreting and understanding this process, guiding cancer treatment and patient care. The automation of histopathology report generation with deep learning stands to significantly enhance clinical efficiency and lessen the labor-intensive, time-consuming burden on pathologists in report writing. In pursuit of this advancement, we introduce HistGen, a multiple instance learning-empowered framework for histopathology report generation together with the first benchmark dataset for evaluation. Inspired by diagnostic and report-writing workflows, HistGen features two delicately designed modules, aiming to boost report generation by aligning whole slide images (WSIs) and diagnostic reports at both local and global granularities. To achieve this, a local-global hierarchical encoder is developed for efficient visual feature aggregation from a region-to-slide perspective. Meanwhile, a cross-modal context module is proposed to explicitly facilitate alignment and interaction between distinct modalities, effectively bridging the gap between the extensive visual sequences of WSIs and corresponding highly summarized reports. Experimental results on WSI report generation show the proposed model outperforms state-of-the-art (SOTA) models by a large margin. Moreover, the results of fine-tuning our model on cancer subtyping and survival analysis tasks further demonstrate superior performance compared to SOTA methods, showcasing strong transfer learning capability. Dataset and code are available here.", "title":"HistGen: Histopathology Report Generation via Local-Global Feature Encoding and Cross-modal Context Interaction", "authors":[ "Guo, Zhengrui", "Ma, Jiabo", "Xu, Yingxue", "Wang, Yihui", "Wang, Liansheng", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2403.05396", "GitHub":[ "https:\/\/github.com\/dddavid4real\/HistGen" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":315 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2830_paper.pdf", "bibtext":"@InProceedings{ Zha_Incorporating_MICCAI2024,\n author = { Zhang, Tiantian and Lin, Manxi and Guo, Hongda and Zhang, Xiaofan and Chiu, Ka Fung Peter and Feragen, Aasa and Dou, Qi },\n title = { { Incorporating Clinical Guidelines through Adapting Multi-modal Large Language Model for Prostate Cancer PI-RADS Scoring } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Prostate Imaging Reporting and Data System (PI-RADS) is pivotal in the diagnosis of clinically significant prostate cancer through MRI imaging. Current deep learning-based PI-RADS scoring methods often lack the incorporation of common PI-RADS clinical guideline (PICG) utilized by radiologists, potentially compromising scoring accuracy. \nThis paper introduces a novel approach that adapts a multi-modal large language model (MLLM) to incorporate PICG into PI-RADS scoring model without additional annotations and network parameters. We present a designed two-stage fine-tuning process aiming at adapting a MLLM originally trained on natural images to the MRI images while effectively integrating the PICG. Specifically, in the first stage, we develop a domain adapter layer tailored for processing 3D MRI inputs and instruct the MLLM to differentiate MRI sequences. In the second stage, we translate PICG for guiding instructions from the model to generate PICG-guided image features. Through such a feature distillation step, we align the scoring network\u2019s features with the PICG-guided image features, which enables the model to effectively incorporate the PICG information. We develop our model on a public dataset and evaluate it on an in-house dataset. \nExperimental results demonstrate that our approach effectively improves the performance of current scoring networks. Code is available at: https:\/\/github.com\/med-air\/PICG2scoring", "title":"Incorporating Clinical Guidelines through Adapting Multi-modal Large Language Model for Prostate Cancer PI-RADS Scoring", "authors":[ "Zhang, Tiantian", "Lin, Manxi", "Guo, Hongda", "Zhang, Xiaofan", "Chiu, Ka Fung Peter", "Feragen, Aasa", "Dou, Qi" ], "id":"Conference", "arxiv_id":"2405.08786", "GitHub":[ "https:\/\/github.com\/med-air\/PICG2scoring" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":316 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0872_paper.pdf", "bibtext":"@InProceedings{ Lim_Diffusionbased_MICCAI2024,\n author = { Liman, Michelle Espranita and Rueckert, Daniel and Fintelmann, Florian J. and M\u00fcller, Philip },\n title = { { Diffusion-based Generative Image Outpainting for Recovery of FOV-Truncated CT Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Field-of-view (FOV) recovery of truncated chest CT scans is crucial for accurate body composition analysis, which involves quantifying skeletal muscle and subcutaneous adipose tissue (SAT) on CT slices. This, in turn, enables disease prognostication. Here, we present a method for recovering truncated CT slices using generative image outpainting. We train a diffusion model and apply it to truncated CT slices generated by simulating a small FOV. Our model reliably recovers the truncated anatomy and outperforms the previous state-of-the-art despite being trained on 87% less data. Our code is available at https:\/\/github.com\/michelleespranita\/ct_palette.", "title":"Diffusion-based Generative Image Outpainting for Recovery of FOV-Truncated CT Images", "authors":[ "Liman, Michelle Espranita", "Rueckert, Daniel", "Fintelmann, Florian J.", "M\u00fcller, Philip" ], "id":"Conference", "arxiv_id":"2406.04769", "GitHub":[ "https:\/\/github.com\/michelleespranita\/ct_palette" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":317 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0621_paper.pdf", "bibtext":"@InProceedings{ Don_Multistage_MICCAI2024,\n author = { Dong, Haichuan and Zhou, Runjie and Yun, Boxiang and Zhou, Huihui and Zhang, Benyan and Li, Qingli and Wang, Yan },\n title = { { Multi-stage Multi-granularity Focus-tuned Learning Paradigm for Medical HSI Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite significant breakthrough in computational pathology that Medical Hyperspectral Imaging (MHSI) has brought, the asymmetric information in spectral and spatial dimensions pose a primary challenge. In this study, we propose a multi-stage multi-granularity Focus-tuned Learning paradigm for Medical HSI Segmentation. To learn subtle spectral differences while equalizing the spatiospectral feature learning, we design a quadruplet learning pre-training and focus-tuned fine-tuning stages for capturing both disease-level and image-level subtle spectral differences while integrating spatially and spectrally dominant features. We propose an intensifying and weakening strategy throughout all stages. Our method significantly outperforms all competitors in MHSI segmentation, with over 3.5% improvement in DSC. Ablation study further shows our method learns compact spatiospectral features while capturing various levels of spectral differences. Code will be released at https:\/\/github.com\/DHC233\/FL.", "title":"Multi-stage Multi-granularity Focus-tuned Learning Paradigm for Medical HSI Segmentation", "authors":[ "Dong, Haichuan", "Zhou, Runjie", "Yun, Boxiang", "Zhou, Huihui", "Zhang, Benyan", "Li, Qingli", "Wang, Yan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/DHC233\/FL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":318 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0415_paper.pdf", "bibtext":"@InProceedings{ P\u0142o_Swin_MICCAI2024,\n author = { P\u0142otka, Szymon and Chrabaszcz, Maciej and Biecek, Przemyslaw },\n title = { { Swin SMT: Global Sequential Modeling for Enhancing 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advances in Vision Transformers (ViTs) have significantly enhanced medical image segmentation by facilitating the learning of global relationships. However, these methods face a notable challenge in capturing diverse local and global long-range sequential feature representations, particularly evident in whole-body CT (WBCT) scans. To overcome this limitation, we introduce Swin Soft Mixture Transformer (Swin SMT), a novel architecture based on Swin UNETR. This model incorporates a Soft Mixture-of-Experts (Soft MoE) to effectively handle complex and diverse long-range dependencies. The use of Soft MoE allows for scaling up model parameters maintaining a balance between computational complexity and segmentation performance in both training and inference modes. We evaluate Swin SMT on the publicly available TotalSegmentator-V2 dataset, which includes 117 major anatomical structures in WBCT images. Comprehensive experimental results demonstrate that Swin SMT outperforms several state-of-the-art methods in 3D anatomical structure segmentation, achieving an average Dice Similarity Coefficient of 85.09%. The code and pre-trained weights of Swin SMT are publicly available at https:\/\/github.com\/MI2DataLab\/SwinSMT.", "title":"Swin SMT: Global Sequential Modeling for Enhancing 3D Medical Image Segmentation", "authors":[ "P\u0142otka, Szymon", "Chrabaszcz, Maciej", "Biecek, Przemyslaw" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MI2DataLab\/SwinSMT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":319 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0006_paper.pdf", "bibtext":"@InProceedings{ Ma_Spatiotemporal_MICCAI2024,\n author = { Ma, Xinghua and Zou, Mingye and Fang, Xinyan and Liu, Yang and Luo, Gongning and Wang, Wei and Wang, Kuanquan and Qiu, Zhaowen and Gao, Xin and Li, Shuo },\n title = { { Spatio-temporal Contrast Network for Data-efficient Learning of Coronary Artery Disease in Coronary CT Angiography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Coronary artery disease (CAD) poses a significant challenge to cardiovascular patients worldwide, underscoring the crucial role of automated CAD diagnostic technology in clinical settings. Previous methods for diagnosing CAD using coronary artery CT angiography (CCTA) images have certain limitations in widespread replication and clinical application due to the high demand for annotated medical imaging data. In this work, we introduce the Spatio-temporal Contrast Network (SC-Net) for the first time, designed to tackle the challenges of data-efficient learning in CAD diagnosis based on CCTA. SC-Net utilizes data augmentation to facilitate clinical feature learning and leverages spatio-temporal prediction-contrast based on dual tasks to maximize the effectiveness of limited data, thus providing clinically reliable predictive results. Experimental findings from a dataset comprising 218 CCTA images from diverse patients demonstrate that SC-Net achieves outstanding performance in automated CAD diagnosis with a reduced number of training samples. The introduction of SC-Net presents a practical data-efficient learning strategy, thereby facilitating the implementation and application of automated CAD diagnosis across a broader spectrum of clinical scenarios. The source code is publicly available at the following link (https:\/\/github.com\/PerceptionComputingLab\/SC-Net).", "title":"Spatio-temporal Contrast Network for Data-efficient Learning of Coronary Artery Disease in Coronary CT Angiography", "authors":[ "Ma, Xinghua", "Zou, Mingye", "Fang, Xinyan", "Liu, Yang", "Luo, Gongning", "Wang, Wei", "Wang, Kuanquan", "Qiu, Zhaowen", "Gao, Xin", "Li, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":320 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2701_paper.pdf", "bibtext":"@InProceedings{ Jia_IarCAC_MICCAI2024,\n author = { Jiang, Weili and Li, Yiming and Yi, Zhang and Wang, Jianyong and Chen, Mao },\n title = { { IarCAC: Instance-aware Representation for Coronary Artery Calcification Segmentation in Cardiac CT angiography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Coronary Artery Calcification (CAC) is a robust indicator of coronary artery disease and a critical determinant of percutaneous coronary intervention outcomes. Our method is inspired by a clinical observation that CAC typically manifests as a sparse distribution of multiple instances. Existing methods focusing solely on spatial correlation overlook the sparse spatial distribution of semantic connections in CAC tasks. Motivated by this, we introduce a novel instance-aware representation method for CAC segmentation, termed IarCAC, which explicitly leverages the sparse connectivity pattern among instances to enhance the model\u2019s instance discrimination capability. The proposed IarCAC first develops an InstanceViT module, which assesses the connection strength between each pair of tokens, enabling the model to learn instance-specific attention patterns. Subsequently, an instance-aware guided module is introduced to learn sparse high-resolution representations over instance-dependent regions in the Fourier domain. To evaluate the effectiveness of the proposed method, we conducted experiments on two challenging CAC datasets and achieved state-of-the-art performance across all datasets. The code is available at https:\/\/github.com\/WeiliJiang\/IarCAC", "title":"IarCAC: Instance-aware Representation for Coronary Artery Calcification Segmentation in Cardiac CT angiography", "authors":[ "Jiang, Weili", "Li, Yiming", "Yi, Zhang", "Wang, Jianyong", "Chen, Mao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":321 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1899_paper.pdf", "bibtext":"@InProceedings{ Bai_NODER_MICCAI2024,\n author = { Bai, Hao and Hong, Yi },\n title = { { NODER: Image Sequence Regression Based on Neural Ordinary Differential Equations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Regression on medical image sequences can capture temporal image pattern changes and predict images at missing or future time points. However, existing geodesic regression methods limit their regression performance by a strong underlying assumption of linear dynamics, while diffusion-based methods have high computational costs and lack constraints to preserve image topology. In this paper, we propose an optimization-based new framework called NODER, which leverages neural ordinary differential equations to capture complex underlying dynamics and reduces its high computational cost of handling high-dimensional image volumes by introducing the latent space. We compare our NODER with two recent regression methods, and the experimental results on ADNI and ACDC datasets demonstrate that our method achieves the SOTA performance in 3D image regression. Our model needs only a couple of images in a sequence for prediction, which is practical, especially for clinical situations where extremely limited image time series are available for analysis.", "title":"NODER: Image Sequence Regression Based on Neural Ordinary Differential Equations", "authors":[ "Bai, Hao", "Hong, Yi" ], "id":"Conference", "arxiv_id":"2407.13241", "GitHub":[ "https:\/\/github.com\/ZedKing12138\/NODER-pytorch" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":322 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2993_paper.pdf", "bibtext":"@InProceedings{ Raj_Assessing_MICCAI2024,\n author = { Raj, Ankita and Swaika, Harsh and Varma, Deepankar and Arora, Chetan },\n title = { { Assessing Risk of Stealing Proprietary Models for Medical Imaging Tasks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The success of deep learning in medical imaging applications has led several companies to deploy proprietary models in diagnostic workflows, offering monetized services. Even though model weights are hidden to protect the intellectual property of the service provider, these models are exposed to model stealing (MS) attacks, where adversaries can clone the model\u2019s functionality by querying it with a proxy dataset and training a thief model on the acquired predictions. While extensively studied on general vision tasks, the susceptibility of medical imaging models to MS attacks remains inadequately explored. This paper investigates the vulnerability of black-box medical imaging models to MS attacks under realistic conditions where the adversary lacks knowledge of the victim model\u2019s training data and operates with limited query budgets. We demonstrate that adversaries can effectively execute MS attacks by using publicly available datasets. To further enhance MS capabilities with limited query budgets, we propose a two-step model stealing approach termed QueryWise. This method capitalizes on unlabeled data obtained from a proxy distribution to train the thief model without incurring additional queries. Evaluation on two medical imaging models for Gallbladder Cancer and COVID-19 classification substantiate the effectiveness of the proposed attack. The source code is available at https:\/\/github.com\/rajankita\/QueryWise.", "title":"Assessing Risk of Stealing Proprietary Models for Medical Imaging Tasks", "authors":[ "Raj, Ankita", "Swaika, Harsh", "Varma, Deepankar", "Arora, Chetan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/rajankita\/QueryWise" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":323 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3722_paper.pdf", "bibtext":"@InProceedings{ Got_PASSION_MICCAI2024,\n author = { Gottfrois, Philippe and Gr\u00f6ger, Fabian and Andriambololoniaina, Faly Herizo and Amruthalingam, Ludovic and Gonzalez-Jimenez, Alvaro and Hsu, Christophe and Kessy, Agnes and Lionetti, Simone and Mavura, Daudi and Ng\u2019ambi, Wingston and Ngongonda, Dingase Faith and Pouly, Marc and Rakotoarisaona, Mendrika Fifaliana and Rapelanoro Rabenja, Fahafahantsoa and Traore\u0301, Ibrahima and Navarini, Alexander A. },\n title = { { PASSION for Dermatology: Bridging the Diversity Gap with Pigmented Skin Images from Sub-Saharan Africa } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Africa faces a huge shortage of dermatologists, with less than\none per million people. This is in stark contrast to the high demand for\ndermatologic care, with 80% of the paediatric population suffering from\nlargely untreated skin conditions. The integration of AI into healthcare\nsparks significant hope for treatment accessibility, especially through the\ndevelopment of AI-supported teledermatology. Current AI models are\npredominantly trained on white-skinned patients and do not general-\nize well enough to pigmented patients. The PASSION project aims to\naddress this issue by collecting images of skin diseases in Sub-Saharan\ncountries with the aim of open-sourcing this data. This dataset is the\nfirst of its kind, consisting of 1,653 patients for a total of 4,901 images.\nThe images are representative of telemedicine settings and encompass\nthe most common paediatric conditions: eczema, fungals, scabies, and\nimpetigo. We also provide a baseline machine learning model trained\non the dataset and a detailed performance analysis for the subpopula-\ntions represented in the dataset. The project website can be found at\nhttps:\/\/passionderm.github.io\/.", "title":"PASSION for Dermatology: Bridging the Diversity Gap with Pigmented Skin Images from Sub-Saharan Africa", "authors":[ "Gottfrois, Philippe", "Gr\u00f6ger, Fabian", "Andriambololoniaina, Faly Herizo", "Amruthalingam, Ludovic", "Gonzalez-Jimenez, Alvaro", "Hsu, Christophe", "Kessy, Agnes", "Lionetti, Simone", "Mavura, Daudi", "Ng\u2019ambi, Wingston", "Ngongonda, Dingase Faith", "Pouly, Marc", "Rakotoarisaona, Mendrika Fifaliana", "Rapelanoro Rabenja, Fahafahantsoa", "Traore\u0301, Ibrahima", "Navarini, Alexander A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":324 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0297_paper.pdf", "bibtext":"@InProceedings{ Bas_Quest_MICCAI2024,\n author = { Basak, Hritam and Yin, Zhaozheng },\n title = { { Quest for Clone: Test-time Domain Adaptation for Medical Image Segmentation by Searching the Closest Clone in Latent Space } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Unsupervised Domain Adaptation (UDA) aims to align labeled source distribution and unlabeled target distribution by mining domain-agnostic feature representation. However, adapting the source-trained model for new target domains after the model is deployed to users poses a significant challenge. \nTo address this, we propose a generative latent search paradigm to reconstruct the closest clone of every target image from the source latent space. This involves utilizing a test-time adaptation (TTA) strategy, wherein a latent optimization step finds the closest clone of each target image from the source representation space using variational sampling of source latent distribution. Thus, our method facilitates domain adaptation without requiring target-domain supervision during training. Moreover, we demonstrate that our approach can be further fine-tuned using a few labeled target data without the need for unlabeled target data, by leveraging global and local label guidance from available target annotations to enhance the downstream segmentation task. We empirically validate the efficacy of our proposed method, surpassing existing UDA, TTA, and SSDA methods in two domain adaptive image segmentation tasks. Code is available at \\href{https:\/\/github.com\/hritam-98\/Quest4Clone}{GitHub}", "title":"Quest for Clone: Test-time Domain Adaptation for Medical Image Segmentation by Searching the Closest Clone in Latent Space", "authors":[ "Basak, Hritam", "Yin, Zhaozheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":325 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1644_paper.pdf", "bibtext":"@InProceedings{ Lia_Enhancing_MICCAI2024,\n author = { Liang, Peixian and Zheng, Hao and Li, Hongming and Gong, Yuxin and Bakas, Spyridon and Fan, Yong },\n title = { { Enhancing Whole Slide Image Classification with Discriminative and Contrastive Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Whole slide image (WSI) classification plays a crucial role in digital pathology data analysis. However, the immense size of WSIs and the absence of fine-grained sub-region labels pose significant challenges for accurate WSI classification. Typical classification-driven deep learning methods often struggle to generate informative image representations, which can compromise the robustness of WSI classification. In this study, we address this challenge by incorporating both discriminative and contrastive learning techniques for WSI classification. Different from the existing contrastive learning methods for WSI classification that primarily rely on pseudo labels assigned to patches based on the WSI-level labels, our approach takes a different route to directly focus on constructing positive and negative samples at the WSI-level. Specifically, we select a subset of representative image patches to represent WSIs and create positive and negative samples at the WSI-level, facilitating effective learning of informative image features. Experimental results on two datasets and ablation studies have demonstrated that our method significantly improved the WSI classification performance compared to state-of-the-art deep learning methods and enabled learning of informative features that promoted robustness of the WSI classification.", "title":"Enhancing Whole Slide Image Classification with Discriminative and Contrastive Learning", "authors":[ "Liang, Peixian", "Zheng, Hao", "Li, Hongming", "Gong, Yuxin", "Bakas, Spyridon", "Fan, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":326 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1763_paper.pdf", "bibtext":"@InProceedings{ Zho_Gait_MICCAI2024,\n author = { Zhou, Zirui and Liang, Junhao and Peng, Zizhao and Fan, Chao and An, Fengwei and Yu, Shiqi },\n title = { { Gait Patterns as Biomarkers: A Video-Based Approach for Classifying Scoliosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Scoliosis poses significant diagnostic challenges, particularly in adolescents, where early detection is crucial for effective treatment. Traditional diagnostic and follow-up methods, which rely on physical examinations and radiography, face limitations due to the need for clinical expertise and the risk of radiation exposure, thus restricting their use for widespread early screening. In response, we introduce a novel, video-based, non-invasive method for scoliosis classification using gait analysis, which circumvents these limitations. This study presents Scoliosis1K, the first large-scale dataset tailored for video-based scoliosis classification, encompassing over one thousand adolescents. Leveraging this dataset, we developed ScoNet, an initial model that encountered challenges in dealing with the complexities of real-world data. This led to the creation of ScoNet-MT, an enhanced model incorporating multi-task learning, which exhibits promising diagnostic accuracy for application purposes. Our findings demonstrate that gait can be a non-invasive biomarker for scoliosis, revolutionizing screening practices with deep learning and setting a precedent for non-invasive diagnostic methodologies. The dataset and code are publicly available at \\url{https:\/\/zhouzi180.github.io\/Scoliosis1K\/}.", "title":"Gait Patterns as Biomarkers: A Video-Based Approach for Classifying Scoliosis", "authors":[ "Zhou, Zirui", "Liang, Junhao", "Peng, Zizhao", "Fan, Chao", "An, Fengwei", "Yu, Shiqi" ], "id":"Conference", "arxiv_id":"2407.05726", "GitHub":[ "https:\/\/github.com\/shiqiyu\/opengait" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":327 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0578_paper.pdf", "bibtext":"@InProceedings{ Sun_PositionGuided_MICCAI2024,\n author = { Sun, Zhichao and Gu, Yuliang and Liu, Yepeng and Zhang, Zerui and Zhao, Zhou and Xu, Yongchao },\n title = { { Position-Guided Prompt Learning for Anomaly Detection in Chest X-Rays } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Anomaly detection in chest X-rays is a critical task. Most methods mainly model the distribution of normal images, and then regard significant deviation from normal distribution as anomaly. Recently, CLIP-based methods, pre-trained on a large number of medical images, have shown impressive performance on zero\/few-shot downstream tasks. In this paper, we aim to explore the potential of CLIP-based methods for anomaly detection in chest X-rays. Considering the discrepancy between the CLIP pre-training data and the task-specific data, we propose a position-guided prompt learning method. Specifically, inspired by the fact that experts diagnose chest X-rays by carefully examining distinct lung regions, we propose learnable position-guided text and image prompts to adapt the task data to the frozen pre-trained CLIP-based model.\nTo enhance the model\u2019s discriminative capability, we propose a novel structure-preserving anomaly synthesis method within chest x-rays during the training process. Extensive experiments on three datasets demonstrate that our proposed method outperforms some state-of-the-art methods. The code of our implementation is available at https:\/\/github.com\/sunzc-sunny\/PPAD.", "title":"Position-Guided Prompt Learning for Anomaly Detection in Chest X-Rays", "authors":[ "Sun, Zhichao", "Gu, Yuliang", "Liu, Yepeng", "Zhang, Zerui", "Zhao, Zhou", "Xu, Yongchao" ], "id":"Conference", "arxiv_id":"2405.11976", "GitHub":[ "https:\/\/github.com\/sunzc-sunny\/PPAD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":328 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0515_paper.pdf", "bibtext":"@InProceedings{ Yan_Region_MICCAI2024,\n author = { Yang, Zhiwen and Chen, Haowei and Qian, Ziniu and Zhou, Yang and Zhang, Hui and Zhao, Dan and Wei, Bingzheng and Xu, Yan },\n title = { { Region Attention Transformer for Medical Image Restoration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Transformer-based methods have demonstrated impressive results in medical image restoration, attributed to the multi-head self-attention (MSA) mechanism in the spatial dimension. However, the majority of existing Transformers conduct attention within fixed and coarsely partitioned regions (\\text{e.g.} the entire image or fixed patches), resulting in interference from irrelevant regions and fragmentation of continuous image content. To overcome these challenges, we introduce a novel Region Attention Transformer (RAT) that utilizes a region-based multi-head self-attention mechanism (R-MSA). The R-MSA dynamically partitions the input image into non-overlapping semantic regions using the robust Segment Anything Model (SAM) and then performs self-attention within these regions. This region partitioning is more flexible and interpretable, ensuring that only pixels from similar semantic regions complement each other, thereby eliminating interference from irrelevant regions. Moreover, we introduce a focal region loss to guide our model to adaptively focus on recovering high-difficulty regions. Extensive experiments demonstrate the effectiveness of RAT in various medical image restoration tasks, including PET image synthesis, CT image denoising, and pathological image super-resolution. Code is available at \\href{https:\/\/github.com\/Yaziwel\/Region-Attention-Transformer-for-Medical-Image-Restoration.git}{https:\/\/github.com\/RAT}.", "title":"Region Attention Transformer for Medical Image Restoration", "authors":[ "Yang, Zhiwen", "Chen, Haowei", "Qian, Ziniu", "Zhou, Yang", "Zhang, Hui", "Zhao, Dan", "Wei, Bingzheng", "Xu, Yan" ], "id":"Conference", "arxiv_id":"2407.09268", "GitHub":[ "https:\/\/github.com\/Yaziwel\/Region-Attention-Transformer-for-Medical-Image-Restoration.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":329 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0931_paper.pdf", "bibtext":"@InProceedings{ Li_From_MICCAI2024,\n author = { Li, Wuyang and Liu, Xinyu and Yang, Qiushi and Yuan, Yixuan },\n title = { { From Static to Dynamic Diagnostics: Boosting Medical Image Analysis via Motion-Informed Generative Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the field of intelligent healthcare, the accessibility of medical data is severely constrained by privacy concerns, high costs, and limited patient cases, which significantly hinder the development of diagnostic models for qualified clinical assistance. Though previous efforts have been made to synthesize medical images via generative models, they are limited to static imagery that fails to capture the dynamic motions in clinical practice, such as contractile patterns of organ walls, leading to vulnerable prediction in diagnostics. To tackle this issue, we propose a holistic paradigm, VidMotion, to boost medical image analysis with generative medical videos, representing the first exploration in this field. VidMotion consists of a Motion-guided Unbiased Enhancement (MUE) to augment static images into dynamic videos at the data level and a Motion-aware Collaborative Learning (MCL) module to learn with images and generated videos jointly at the model level. Specifically, MUE first transforms medical images into generative videos enriched with diverse clinical motions, which are guided by image-to-video generative foundation models. Then, to avoid the potential clinical bias caused by the imbalanced generative videos, we design an unbiased sampling strategy informed by the class distribution prior statistically, thereby extracting high-quality video frames. In MCL, we perform joint learning with the image and video representation, including a video-to-image distillation and image-to-image consistency, to fully capture the intrinsic motion semantics for motion-informed diagnosis. We validate our method on extensive semi-supervised learning benchmarks and justify that VidMotion is highly effective and efficient, outperforming state-of-the-art approaches significantly. The code will be released to push forward the community.", "title":"From Static to Dynamic Diagnostics: Boosting Medical Image Analysis via Motion-Informed Generative Videos", "authors":[ "Li, Wuyang", "Liu, Xinyu", "Yang, Qiushi", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/VidMotion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":330 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2368_paper.pdf", "bibtext":"@InProceedings{ Sah_FedMRL_MICCAI2024,\n author = { Sahoo, Pranab and Tripathi, Ashutosh and Saha, Sriparna and Mondal, Samrat },\n title = { { FedMRL: Data Heterogeneity Aware Federated Multi-agent Deep Reinforcement Learning for Medical Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite recent advancements in federated learning (FL) for medical image diagnosis, addressing data heterogeneity among clients remains a significant challenge for practical implementation. A primary hurdle in FL arises from the non-IID nature of data samples across clients, which typically results in a decline in the performance of the aggregated global model. In this study, we introduce FedMRL, a novel federated multi-agent deep reinforcement learning framework designed to address data heterogeneity. FedMRL incorporates a novel loss function to facilitate fairness among clients, preventing bias in the final global model. Additionally, it employs a multi-agent reinforcement learning (MARL) approach to calculate the proximal term (\u03bc) for the personalized local objective function, ensuring convergence to the global optimum. Furthermore, FedMRL integrates an adaptive weight adjustment method using a Self-organizing map (SOM) on the server side to counteract distribution shifts among clients\u2019 local data distributions. We assess our approach using two publicly available real-world medical datasets, and the results demonstrate that FedMRL significantly outperforms state-of-the-art techniques, showing its efficacy in addressing data heterogeneity in federated learning.", "title":"FedMRL: Data Heterogeneity Aware Federated Multi-agent Deep Reinforcement Learning for Medical Imaging", "authors":[ "Sahoo, Pranab", "Tripathi, Ashutosh", "Saha, Sriparna", "Mondal, Samrat" ], "id":"Conference", "arxiv_id":"2407.05800", "GitHub":[ "https:\/\/github.com\/Pranabiitp\/FedMRL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":331 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3759_paper.pdf", "bibtext":"@InProceedings{ Rad_ThyGraph_MICCAI2024,\n author = { Radhachandran, Ashwath and Vittalam, Alekhya and Ivezic, Vedrana and Sant, Vivek and Athreya, Shreeram and Moleta, Chace and Patel, Maitraya and Masamed, Rinat and Arnold, Corey and Speier, William },\n title = { { ThyGraph: A Graph-Based Approach for Thyroid Nodule Diagnosis from Ultrasound Studies } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Improved thyroid nodule risk stratification from ultrasound (US) can mitigate overdiagnosis and unnecessary biopsies. Previous studies often train deep learning models using manually selected single US frames; these approaches deviate from clinical practice where physicians utilize multiple image views for diagnosis. This paper introduces ThyGraph, a novel graph-based approach that improves feature aggregation and correlates anatomically proximate images, by leveraging spatial information to model US image studies as patient-level graphs. Graph convolutional networks are trained on image-based and patch-based graphs generated from 505 US image studies to predict nodule malignancy. Self-attention graph pooling is introduced to produce a node-level interpretability metric that is visualized downstream to identify important inputs. Our best performing model demonstrated an AUROC of 0.866\u00b10.019 and AUPRC of 0.749\u00b10.043 across five-fold cross validation, significantly outperforming two previously published attention-based feature aggregation networks. These previous studies fail to account for spatial dependencies by modeling images within a study as independent, uncorrelated instances. In the proposed graph paradigm, ThyGraph can effectively aggregate information across views of a nodule and take advantage of inter-image dependencies to improve nodule risk stratification, leading to better patient triaging and reducing reliance on biopsies. Code is available at https:\/\/github.com\/ashwath-radha\/ThyGraph.", "title":"ThyGraph: A Graph-Based Approach for Thyroid Nodule Diagnosis from Ultrasound Studies", "authors":[ "Radhachandran, Ashwath", "Vittalam, Alekhya", "Ivezic, Vedrana", "Sant, Vivek", "Athreya, Shreeram", "Moleta, Chace", "Patel, Maitraya", "Masamed, Rinat", "Arnold, Corey", "Speier, William" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ashwath-radha\/ThyGraph" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":332 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0781_paper.pdf", "bibtext":"@InProceedings{ Xia_Generalizing_MICCAI2024,\n author = { Xia, Peng and Hu, Ming and Tang, Feilong and Li, Wenxue and Zheng, Wenhao and Ju, Lie and Duan, Peibo and Yao, Huaxiu and Ge, Zongyuan },\n title = { { Generalizing to Unseen Domains in Diabetic Retinopathy with Disentangled Representations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diabetic Retinopathy (DR), induced by diabetes, poses a significant risk of visual impairment. Accurate and effective grading of DR aids in the treatment of this condition. Yet existing models experience notable performance degradation on unseen domains due to domain shifts. Previous methods address this issue by simulating domain style through simple visual transformation and mitigating domain noise via learning robust representations. However, domain shifts encompass more than image styles. They overlook biases caused by implicit factors such as ethnicity, age, and diagnostic criteria. In our work, we propose a novel framework where representations of paired data from different domains are decoupled into semantic features and domain noise. The resulting augmented representation comprises original retinal semantics and domain noise from other domains, aiming to generate enhanced representations aligned with real-world clinical needs, incorporating rich information from diverse domains. Subsequently, to improve the robustness of the decoupled representations, class and domain prototypes are employed to interpolate the disentangled representations, and data-aware weights are designed to focus on rare classes and domains. Finally, we devise a robust pixel-level semantic alignment loss to align retinal semantics decoupled from features, maintaining a balance between intra-class diversity and dense class features. Experimental results on multiple benchmarks demonstrate the effectiveness of our method on unseen domains. The code implementations are accessible on https:\/\/github.com\/richard-peng-xia\/DECO.", "title":"Generalizing to Unseen Domains in Diabetic Retinopathy with Disentangled Representations", "authors":[ "Xia, Peng", "Hu, Ming", "Tang, Feilong", "Li, Wenxue", "Zheng, Wenhao", "Ju, Lie", "Duan, Peibo", "Yao, Huaxiu", "Ge, Zongyuan" ], "id":"Conference", "arxiv_id":"2406.06384", "GitHub":[ "https:\/\/github.com\/richard-peng-xia\/DECO" ], "paper_page":"https:\/\/huggingface.co\/papers\/2406.06384", "n_linked_authors":1, "upvotes":1, "num_comments":0, "n_authors":9, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":333 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0588_paper.pdf", "bibtext":"@InProceedings{ Dak_On_MICCAI2024,\n author = { Dakri, Abdelmouttaleb and Arora, Vaibhav and Challier, L\u00e9o and Keller, Marilyn and Black, Michael J. and Pujades, Sergi },\n title = { { On predicting 3D bone locations inside the human body } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Knowing the precise location of the bones inside the human body is key in several medical tasks, such as patient placement inside an imaging device or surgical navigation inside a patient. \nOur goal is to predict the bone locations using only an external 3D body surface observation.\nExisting approaches either validate their predictions on 2D data (X-rays) or with pseudo-ground truth computed from motion capture using biomechanical models.\nThus, methods either suffer from a 3D-2D projection ambiguity or directly lack validation on clinical imaging data. \nIn this work, we start with a dataset of segmented skin and long bones obtained from 3D full body MRI images that we refine into individual bone segmentations.\nTo learn the skin to bones correlations, one needs to register the paired data.\nFew anatomical models allow to register a skeleton and the skin simultaneously.\nOne such method, SKEL, has a skin and skeleton that is jointly rigged with the same pose parameters. However, it lacks the flexibility to adjust the bone locations inside its skin.\nTo address this, we extend SKEL into SKEL-J to allow its bones to fit the segmented bones while its skin fits the segmented skin.\nThese precise fits allow us to train SKEL-J to more accurately infer the anatomical joint locations from the skin surface.\nOur qualitative and quantitative results show how our bone location predictions are more accurate than all existing approaches.\nTo foster future research, we make available for research purposes the individual bone segmentations, the fitted SKEL-J models as well as the new inference methods at https:\/\/3dbones.is.tue.mpg.de.", "title":"On predicting 3D bone locations inside the human body", "authors":[ "Dakri, Abdelmouttaleb", "Arora, Vaibhav", "Challier, L\u00e9o", "Keller, Marilyn", "Black, Michael J.", "Pujades, Sergi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":334 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1489_paper.pdf", "bibtext":"@InProceedings{ Qin_DBSAM_MICCAI2024,\n author = { Qin, Chao and Cao, Jiale and Fu, Huazhu and Shahbaz Khan, Fahad and Anwer, Rao Muhammad },\n title = { { DB-SAM: Delving into High Quality Universal Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recently, the Segment Anything Model (SAM) has demonstrated promising segmentation capabilities in a variety of downstream segmentation tasks. However in the context of universal medical image segmentation there exists a notable performance discrepancy when directly applying SAM due to the domain gap between natural and 2D\/3D medical data. In this work, we propose a dual-branch adapted SAM framework, named DB-SAM, that strives to effectively bridge this domain gap. Our dual-branch adapted SAM contains two branches in parallel: a ViT branch and a convolution branch. The ViT branch incorporates a learnable channel attention block after each frozen attention block, which captures domain-specific local features. On the other hand, the convolution branch employs a light-weight convolutional block to extract domain-specific shallow features from the input medical image. To perform cross-branch feature fusion, we design a bilateral cross-attention block and a ViT convolution fusion block, which dynamically combine diverse information of two branches for mask decoder. Extensive experiments on large-scale medical image dataset with various 3D and 2D medical segmentation tasks reveal the merits of our proposed contributions. On 21 3D medical image segmentation tasks, our proposed DB-SAM achieves an absolute gain of 8.8\\%, compared to a recent medical SAM adapter in the literature. Our code and models will be publicly released.", "title":"DB-SAM: Delving into High Quality Universal Medical Image Segmentation", "authors":[ "Qin, Chao", "Cao, Jiale", "Fu, Huazhu", "Shahbaz Khan, Fahad", "Anwer, Rao Muhammad" ], "id":"Conference", "arxiv_id":"2410.04172", "GitHub":[ "https:\/\/github.com\/AlfredQin\/DB-SAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":335 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4075_paper.pdf", "bibtext":"@InProceedings{ Che_TransWindow_MICCAI2024,\n author = { Chen, Jiahe and Kobayashi, Etsuko and Sakuma, Ichiro and Tomii, Naoki },\n title = { { Trans-Window Panoramic Impasto for Online Tissue Deformation Recovery } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deformation recovery from laparoscopic images benefits many downstream applications like robot planning, intraoperative navigation and surgical safety assessment. We define tissue deformation as time-variant surface structure and displacement. Besides, we also pay attention to the surface strain, which bridges the visual observation and the tissue biomechanical status, for which continuous pointwise surface mapping and tracking are necessary. Previous SLAM-based methods cannot cope with instrument-induced occlusion and severe scene deformation, while the neural field-based ones are offline and scene-specific, which hinders their application in continuous mapping. Moreover, neither approach meets the requirement of continuous pointwise tracking.\nTo overcome these limitations, we assume a deformable environment and a movable window through which an observer depicts the environment\u2019s 3D structure on a canonical canvas as maps in a process named impasto. The observer performs panoramic impasto for the currently and previously observed 3D structure in a two-step online approach: optimization and fusion. The optimization of the maps compensates for the error in the observation of the structure and the tracking by preserving spatiotemporal smoothness, while the fusion is for merging the estimated and the newly observed maps by ensuring visibility.\nExperiments were conducted using ex vivo and in vivo stereo laparoscopic datasets where tool-tissue interaction occurs and large camera motion exists. Results demonstrate that the proposed online method is robust to instrument-induced occlusion, capable of estimating surface strain, and can continuously reconstruct and track surface points regardless of camera motion. Code is available at: https:\/\/github.com\/bmpelab\/trans_window_panoramic_impasto.git", "title":"Trans-Window Panoramic Impasto for Online Tissue Deformation Recovery", "authors":[ "Chen, Jiahe", "Kobayashi, Etsuko", "Sakuma, Ichiro", "Tomii, Naoki" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/bmpelab\/trans_window_panoramic_impasto.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":336 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0843_paper.pdf", "bibtext":"@InProceedings{ Liu_When_MICCAI2024,\n author = { Liu, Yifan and Li, Wuyang and Wang, Cheng and Chen, Hui and Yuan, Yixuan },\n title = { { When 3D Partial Points Meets SAM: Tooth Point Cloud Segmentation with Sparse Labels } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tooth point cloud segmentation is a fundamental task in many orthodontic applications. Current research mainly focuses on fully supervised learning which demands expensive and tedious manual point-wise annotation. Although recent weakly-supervised alternatives are proposed to use weak labels for 3D segmentation and achieve promising results, they tend to fail when the labels are extremely sparse. \nInspired by the powerful promptable segmentation capability of the Segment Anything Model (SAM), we propose a framework named SAMTooth that leverages such capacity to complement the extremely sparse supervision. To automatically generate appropriate point prompts for SAM, we propose a novel Confidence-aware Prompt Generation strategy, where coarse category predictions are aggregated with confidence-aware filtering. Furthermore, to fully exploit the structural and shape clues in SAM\u2019s outputs for assisting the 3D feature learning, we advance a Mask-guided Representation Learning that re-projects the generated tooth masks of SAM into 3D space and constrains these points of different teeth to possess distinguished representations. To demonstrate the effectiveness of the framework, we conduct experiments on the public dataset and surprisingly find with only 0.1\\% annotations (one point per tooth), our method can surpass recent weakly supervised methods by a large margin, and the performance is even comparable to the recent fully-supervised methods, showcasing the significant potential of applying SAM to 3D perception tasks with sparse labels. Code is available at https:\/\/github.com\/CUHK-AIM-Group\/SAMTooth.", "title":"When 3D Partial Points Meets SAM: Tooth Point Cloud Segmentation with Sparse Labels", "authors":[ "Liu, Yifan", "Li, Wuyang", "Wang, Cheng", "Chen, Hui", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"2409.01691", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/SAMTooth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":337 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0651_paper.pdf", "bibtext":"@InProceedings{ Zhu_Semisupervised_MICCAI2024,\n author = { Zhu, Ruiyun and Oda, Masahiro and Hayashi, Yuichiro and Kitasaka, Takayuki and Mori, Kensaku },\n title = { { Semi-supervised Tubular Structure Segmentation with Cross Geometry and Hausdorff Distance Consistency } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study introduces a novel semi-supervised method for 3D segmentation of tubular structures. Complete and automated segmentation of complex tubular structures in medical imaging remains a challenging task. \nTraditional supervised deep learning methods often demand a tremendous number of annotated data to train the deep model, with the high cost and difficulty of obtaining annotations. To address this, a semi-supervised approach could be a viable solution. Segmenting complex tubular structures with limited annotated data remains a formidable challenge. Many semi-supervised techniques rely on pseudo-labeling, which involves generating labels for unlabeled images based on predictions from a model trained on labeled data. Besides, several semi-supervised learning methods are proposed based on data-level consistency, which enforces consistent predictions by applying perturbations to input images. However, these methods tend to overlook the geometric shape characteristics of the segmentation targets. In our research, we introduce a task-level consistency learning approach that incorporates cross geometry consistency and the Hausdorff distance consistency, taking advantage of the geometric shape properties of both labeled and unlabeled data. Our deep learning model generates both a segmentation map and a distance transform map. By applying the proposed consistency, we ensure that the geometric shapes in both maps align closely, thereby enhancing the accuracy and performance of tubular structure segmentation. We tested our method on airway segmentation in 3D CT scans, where it outperformed the recent state-of-the-art methods, showing an 88.4% tree length detected rate, 82.8% branch detected rate, and 89.7% precision rate.", "title":"Semi-supervised Tubular Structure Segmentation with Cross Geometry and Hausdorff Distance Consistency", "authors":[ "Zhu, Ruiyun", "Oda, Masahiro", "Hayashi, Yuichiro", "Kitasaka, Takayuki", "Mori, Kensaku" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":338 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1964_paper.pdf", "bibtext":"@InProceedings{ Zhu_LoCIDiffCom_MICCAI2024,\n author = { Zhu, Zihao and Tao, Tianli and Tao, Yitian and Deng, Haowen and Cai, Xinyi and Wu, Gaofeng and Wang, Kaidong and Tang, Haifeng and Zhu, Lixuan and Gu, Zhuoyang and Shen, Dinggang and Zhang, Han },\n title = { { LoCI-DiffCom: Longitudinal Consistency-Informed Diffusion Model for 3D Infant Brain Image Completion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The infant brain undergoes rapid development in the first few years after birth. Compared to cross-sectional studies, longitudinal studies can depict the trajectories of infants\u2019 brain development with higher accuracy, statistical power and flexibility. However, the collection of infant longitudinal magnetic resonance (MR) data suffers a notorious dropout problem, resulting in incomplete datasets with missing time points. This limitation significantly impedes subsequent neuroscience and clinical modeling. Yet, existing deep generative models are facing difficulties in missing brain image completion, due to sparse data and the nonlinear, dramatic contrast\/geometric variations in the developing brain. We propose LoCI-DiffCom, a novel Longitudinal Consistency-Informed Diffusion model for infant brain image Completion, which integrates the images from preceding and subsequent time points to guide a diffusion model for generating high-fidelity missing data. Our designed LoCI module can work on highly sparse sequences, relying solely on data from two temporal points. Despite wide separation and diversity between age time points, our approach can extract individualized developmental features while ensuring context-aware consistency. Our experiments on a large infant brain MR dataset demonstrate its effectiveness with consistent performance on missing infant brain MR completion even in big gap scenarios, aiding in better delineation of early developmental trajectories.", "title":"LoCI-DiffCom: Longitudinal Consistency-Informed Diffusion Model for 3D Infant Brain Image Completion", "authors":[ "Zhu, Zihao", "Tao, Tianli", "Tao, Yitian", "Deng, Haowen", "Cai, Xinyi", "Wu, Gaofeng", "Wang, Kaidong", "Tang, Haifeng", "Zhu, Lixuan", "Gu, Zhuoyang", "Shen, Dinggang", "Zhang, Han" ], "id":"Conference", "arxiv_id":"2405.10691", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":339 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1060_paper.pdf", "bibtext":"@InProceedings{ Kal_Unsupervised_MICCAI2024,\n author = { Kalkhof, John and Ranem, Amin and Mukhopadhyay, Anirban },\n title = { { Unsupervised Training of Neural Cellular Automata on Edge Devices } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The disparity in access to machine learning tools for medical imaging across different regions significantly limits the potential for universal healthcare innovation, particularly in remote areas. Our research addresses this issue by implementing Neural Cellular Automata (NCA) training directly on smartphones for accessible X-ray lung segmentation. We confirm the practicality and feasibility of deploying and training these advanced models on five Android devices, improving medical diagnostics accessibility and bridging the tech divide to extend machine learning benefits in medical imaging to low- and middle-income countries. We further enhance this approach with an unsupervised adaptation method using the novel Variance-Weighted Segmentation Loss (VWSL), which efficiently learns from unlabeled data by minimizing the variance from multiple NCA predictions. This strategy notably improves model adaptability and performance across diverse medical imaging contexts without the need for extensive computational resources or labeled datasets, effectively lowering the participation threshold. Our methodology, tested on three multisite X-ray datasets\u2014Padchest, ChestX-ray8, and MIMIC-III\u2014demonstrates improvements in segmentation Dice accuracy by 0.7 to 2.8%, compared to the classic Med-NCA. Additionally, in extreme cases where no digital copy is available and images must be captured by a phone from an X-ray lightbox or monitor, VWSL enhances Dice accuracy by 5-20%, demonstrating the method\u2019s robustness even with suboptimal image sources.", "title":"Unsupervised Training of Neural Cellular Automata on Edge Devices", "authors":[ "Kalkhof, John", "Ranem, Amin", "Mukhopadhyay, Anirban" ], "id":"Conference", "arxiv_id":"2407.18114", "GitHub":[ "https:\/\/github.com\/MECLabTUDA\/M3D-NCA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":340 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0802_paper.pdf", "bibtext":"@InProceedings{ Xie_SimTxtSeg_MICCAI2024,\n author = { Xie, Yuxin and Zhou, Tao and Zhou, Yi and Chen, Geng },\n title = { { SimTxtSeg: Weakly-Supervised Medical Image Segmentation with Simple Text Cues } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Weakly-supervised medical image segmentation is a challenging task that aims to reduce the annotation cost while keep the segmentation performance. In this paper, we present a novel framework, SimTxtSeg, that leverages simple text cues to generate high-quality pseudo-labels and study the cross-modal fusion in training segmentation models, simultaneously. Our contribution consists of two key components: an effective Textual-to-Visual Cue Converter that produces visual prompts from text prompts on medical images, and a text-guided segmentation model with Text-Vision Hybrid Attention that fuses text and image features. We evaluate our framework on two medical image segmentation tasks: colonic polyp segmentation and MRI brain tumor segmentation, and achieve consistent state-of-the-art performance. Source code is available at: https:\/\/github.com\/xyx1024\/SimTxtSeg.", "title":"SimTxtSeg: Weakly-Supervised Medical Image Segmentation with Simple Text Cues", "authors":[ "Xie, Yuxin", "Zhou, Tao", "Zhou, Yi", "Chen, Geng" ], "id":"Conference", "arxiv_id":"2406.19364", "GitHub":[ "https:\/\/github.com\/xyx1024\/SimTxtSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":341 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2751_paper.pdf", "bibtext":"@InProceedings{ Wan_Progressively_MICCAI2024,\n author = { Wang, Yaqi and Cao, Peng and Hou, Qingshan and Lan, Linqi and Yang, Jinzhu and Liu, Xiaoli and Zaiane, Osmar R. },\n title = { { Progressively Correcting Soft Labels via Teacher Team for Knowledge Distillation in Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"State-of-the-art knowledge distillation (KD) methods aim to capture the underlying information within the teacher and explore effective strategies for knowledge transfer.\nHowever, due to challenges such as blurriness, noise, and low contrast inherent in medical images, the teacher\u2019s predictions (soft labels) may also include false information, thus potentially misguiding the student\u2019s learning process.\nAddressing this, we pioneer a novel correction-based KD approach (PLC-KD) and introduce two assistants for perceiving and correcting the false soft labels.\nMore specifically, the false-pixel-aware assistant targets global error correction, while the boundary-aware assistant focuses on lesion boundary errors.\nAdditionally, a similarity-based correction scheme is designed to forcefully rectify the remaining hard false pixels.\nThrough this collaborative effort, teacher team (comprising a teacher and two assistants) progressively generates more accurate soft labels, ensuring the \u201call-correct\u201d final soft labels for student guidance during KD. \nExtensive experimental results demonstrate that the proposed PLC-KD framework attains superior performance to state-of-the-art methods on three challenging medical segmentation tasks.", "title":"Progressively Correcting Soft Labels via Teacher Team for Knowledge Distillation in Medical Image Segmentation", "authors":[ "Wang, Yaqi", "Cao, Peng", "Hou, Qingshan", "Lan, Linqi", "Yang, Jinzhu", "Liu, Xiaoli", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":342 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2811_paper.pdf", "bibtext":"@InProceedings{ Jeo_Uncertaintyaware_MICCAI2024,\n author = { Jeong, Minjae and Cho, Hyuna and Jung, Sungyoon and Kim, Won Hwa },\n title = { { Uncertainty-aware Diffusion-based Adversarial Attack for Realistic Colonoscopy Image Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated semantic segmentation in colonoscopy is crucial for detecting colon polyps and preventing the development of colorectal cancer. However, the scarcity of annotated data presents a challenge to the segmentation task. Recent studies address this data scarcity issue with data augmentation techniques such as perturbing data with adversarial noises or using a generative model to sample unseen images from a learned data distribution. The perturbation approach controls the level of data ambiguity to expand discriminative regions but the augmented noisy images exhibit a lack of diversity. On the other hand, generative models yield diverse realistic images but they cannot directly control the data ambiguity. Therefore, we propose Diffusion-based Adversarial attack for Semantic segmentation considering Pixel-level uncertainty (DASP), which incorporates both the controllability of ambiguity in adversarial attack and the data diversity of generative models. Using a hierarchical mask-to-image generation scheme, our method generates both expansive labels and their corresponding images that exhibit diversity and realism. Also, our method controls the magnitude of adversarial attack per pixel considering its uncertainty such that a network prioritizes learning on challenging pixels. The effectivity of our method is extensively validated on two public polyp segmentation benchmarks with four backbone networks, demonstrating its superiority over eleven baselines.", "title":"Uncertainty-aware Diffusion-based Adversarial Attack for Realistic Colonoscopy Image Synthesis", "authors":[ "Jeong, Minjae", "Cho, Hyuna", "Jung, Sungyoon", "Kim, Won Hwa" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":343 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0236_paper.pdf", "bibtext":"@InProceedings{ Sei_Spatial_MICCAI2024,\n author = { Seibold, Matthias and Bahari Malayeri, Ali and F\u00fcrnstahl, Philipp },\n title = { { Spatial Context Awareness in Surgery through Sound Source Localization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Context awareness and scene understanding is an integral component for the development of intelligent systems in computer-aided and robotic surgery. While most systems primarily utilize visual data for scene understanding, recent proof-of-concepts have showcased the potential of acoustic signals for the detection and analysis of surgical activity that is associated with typical noise emissions. However, acoustic approaches have not yet been effectively employed for localization tasks in surgery, which are crucial to obtain a comprehensive understanding of a scene. In this work, we introduce the novel concept of Sound Source Localization (SSL) for surgery which can reveal acoustic activity and its location in the surgical field, therefore providing insight into the interactions of surgical staff with the patient and medical equipment.", "title":"Spatial Context Awareness in Surgery through Sound Source Localization", "authors":[ "Seibold, Matthias", "Bahari Malayeri, Ali", "F\u00fcrnstahl, Philipp" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":344 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3008_paper.pdf", "bibtext":"@InProceedings{ Hu_Perspective_MICCAI2024,\n author = { Hu, Jintong and Chen, Siyan and Pan, Zhiyi and Zeng, Sen and Yang, Wenming },\n title = { { Perspective+ Unet: Enhancing Segmentation with Bi-Path Fusion and Efficient Non-Local Attention for Superior Receptive Fields } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Precise segmentation of medical images is fundamental for extracting critical clinical information, which plays a pivotal role in enhancing the accuracy of diagnoses, formulating effective treatment plans, and improving patient outcomes. Although Convolutional Neural Networks (CNNs) and non-local attention methods have achieved notable success in medical image segmentation, they either struggle to capture long-range spatial dependencies due to their reliance on local features, or face significant computational and feature integration challenges when attempting to address this issue with global attention mechanisms. To overcome existing limitations in medical image segmentation, we propose a novel architecture, Perspective+ Unet. This framework is characterized by three major innovations: (i) It introduces a dual-pathway strategy at the encoder stage that combines the outcomes of traditional and dilated convolutions. This not only maintains the local receptive field but also significantly expands it, enabling better comprehension of the global structure of images while retaining detail sensitivity. (ii) The framework incorporates an efficient non-local transformer block, named ENLTB, which utilizes kernel function approximation for effective long-range dependency capture with linear computational and spatial complexity. (iii) A Spatial Cross-Scale Integrator strategy is employed to merge global dependencies and local contextual cues across model stages, meticulously refining features from various levels to harmonize global and local information. Experimental results on the ACDC and Synapse datasets demonstrate the effectiveness of our proposed Perspective+ Unet. The code is available in the supplementary material.", "title":"Perspective+ Unet: Enhancing Segmentation with Bi-Path Fusion and Efficient Non-Local Attention for Superior Receptive Fields", "authors":[ "Hu, Jintong", "Chen, Siyan", "Pan, Zhiyi", "Zeng, Sen", "Yang, Wenming" ], "id":"Conference", "arxiv_id":"2406.14052", "GitHub":[ "https:\/\/github.com\/tljxyys\/Perspective-Unet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":345 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2469_paper.pdf", "bibtext":"@InProceedings{ Pod_HDilemma_MICCAI2024,\n author = { Podobnik, Gas\u030cper and Vrtovec, Tomaz\u030c },\n title = { { HDilemma: Are Open-Source Hausdorff Distance Implementations Equivalent? } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Quantitative performance metrics play a pivotal role in medical imaging by offering critical insights into method performance and facilitating objective method comparison. Recently, platforms providing recommendations for metrics selection as well as resources for evaluating methods through computational challenges and online benchmarking have emerged, with an inherent assumption that metrics implementations are consistent across studies and equivalent throughout the community. In this study, we question this assumption by reviewing five different open-source implementations for computing the Hausdorff distance (HD), a boundary-based metric commonly used for assessing the performance of semantic segmentation. Despite sharing a single generally accepted mathematical definition, our experiments reveal notable systematic differences in the HD and its 95th percentile variant across implementations when applied to clinical segmentations with varying voxel sizes, which fundamentally impacts and constrains the ability to objectively compare results across different studies. Our findings should encourage the medical imaging community towards standardizing the implementation of the HD computation, so as to foster objective, reproducible and consistent comparisons when reporting performance results.", "title":"HDilemma: Are Open-Source Hausdorff Distance Implementations Equivalent?", "authors":[ "Podobnik, Gas\u030cper", "Vrtovec, Tomaz\u030c" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":346 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2447_paper.pdf", "bibtext":"@InProceedings{ Bui_FALFormer_MICCAI2024,\n author = { Bui, Doanh C. and Vuong, Trinh Thi Le and Kwak, Jin Tae },\n title = { { FALFormer: Feature-aware Landmarks self-attention for Whole-slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Slide-level classification for whole-slide images (WSIs) has been widely recognized as a crucial problem in digital and computational pathology. Current approaches commonly consider WSIs as a bag of cropped patches and process them via multiple instance learning due to the large number of patches, which cannot fully explore the relationship among patches; in other words, the global information cannot be fully incorporated into decision making. Herein, we propose an efficient and effective slide-level classification model, named as FALFormer, that can process a WSI as a whole so as to fully exploit the relationship among the entire patches and to improve the classification performance. FALFormer is built based upon Transformers and self-attention mechanism. To lessen the computational burden of the original self-attention mechanism and to process the entire patches together in a WSI, FALFormer employs Nystr\u00f6m self-attention which approximates the computation by using a smaller number of tokens or landmarks. For effective learning, FALFormer introduces feature-aware landmarks to enhance the representation power of the landmarks and the quality of the approximation. We systematically evaluate the performance of FALFormer using two public datasets, including CAMELYON16 and TCGA-BRCA. The experimental results demonstrate that FALFormer achieves superior performance on both datasets, outperforming the state-of-the-art methods for the slide-level classification. This suggests that FALFormer can facilitate an accurate and precise analysis of WSIs, potentially leading to improved diagnosis and prognosis on WSIs.", "title":"FALFormer: Feature-aware Landmarks self-attention for Whole-slide Image Classification", "authors":[ "Bui, Doanh C.", "Vuong, Trinh Thi Le", "Kwak, Jin Tae" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/quiil\/falformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":347 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1336_paper.pdf", "bibtext":"@InProceedings{ Lin_Beyond_MICCAI2024,\n author = { Lin, Xian and Xiang, Yangyang and Yu, Li and Yan, Zengqiang },\n title = { { Beyond Adapting SAM: Towards End-to-End Ultrasound Image Segmentation via Auto Prompting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"End-to-end medical image segmentation is of great value for computer-aided diagnosis dominated by task-specific models, usually suffering from poor generalization. With recent breakthroughs brought by the segment anything model (SAM) for universal image segmentation, extensive efforts have been made to adapt SAM for medical imaging but still encounter two major issues: 1) severe performance degradation and limited generalization without proper adaptation, and 2) semi-automatic segmentation relying on accurate manual prompts for interaction. In this work, we propose SAMUS as a universal model tailored for ultrasound image segmentation and further enable it to work in an end-to-end manner denoted as AutoSAMUS. Specifically, in SAMUS, a parallel CNN branch is introduced to supplement local information through cross-branch attention, and a feature adapter and a position adapter are jointly used to adapt SAM from natural to ultrasound domains while reducing training complexity. AutoSAMUS is realized by introducing an auto prompt generator (APG) to replace the manual prompt encoder of SAMUS to automatically generate prompt embeddings. A comprehensive ultrasound dataset, comprising about 30k images and 69k masks and covering six object categories, is collected for verification. Extensive comparison experiments demonstrate the superiority of SAMUS and AutoSAMUS against the state-of-the-art task-specific and SAM-based foundation models. We believe the auto-prompted SAM-based model has the potential to become a new paradigm for end-to-end medical image segmentation and deserves more exploration. Code and data are available at https:\/\/github.com\/xianlin7\/SAMUS.", "title":"Beyond Adapting SAM: Towards End-to-End Ultrasound Image Segmentation via Auto Prompting", "authors":[ "Lin, Xian", "Xiang, Yangyang", "Yu, Li", "Yan, Zengqiang" ], "id":"Conference", "arxiv_id":"2309.06824", "GitHub":[ "https:\/\/github.com\/xianlin7\/SAMUS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":348 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1807_paper.pdf", "bibtext":"@InProceedings{ Na_RadiomicsFillMammo_MICCAI2024,\n author = { Na, Inye and Kim, Jonghun and Ko, Eun Sook and Park, Hyunjin },\n title = { { RadiomicsFill-Mammo: Synthetic Mammogram Mass Manipulation with Radiomics Features } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Motivated by the question, \u201cCan we generate tumors with desired attributes?\u2019\u2019 this study leverages radiomics features to explore the feasibility of generating synthetic tumor images. Characterized by its low-dimensional yet biologically meaningful markers, radiomics bridges the gap between complex medical imaging data and actionable clinical insights. We present RadiomicsFill-Mammo, the first of the RadiomicsFill series, an innovative technique that generates realistic mammogram mass images mirroring specific radiomics attributes using masked images and opposite breast images, leveraging a recent stable diffusion model. This approach also allows for the incorporation of essential clinical variables, such as BI-RADS and breast density, alongside radiomics features as conditions for mass generation. Results indicate that RadiomicsFill-Mammo effectively generates diverse and realistic tumor images based on various radiomics conditions. Results also demonstrate a significant improvement in mass detection capabilities, leveraging RadiomicsFill-Mammo as a strategy to generate simulated samples. Furthermore, RadiomicsFill-Mammo not only advances medical imaging research but also opens new avenues for enhancing treatment planning and tumor simulation. Our code is available at https:\/\/github.com\/nainye\/RadiomicsFill.", "title":"RadiomicsFill-Mammo: Synthetic Mammogram Mass Manipulation with Radiomics Features", "authors":[ "Na, Inye", "Kim, Jonghun", "Ko, Eun Sook", "Park, Hyunjin" ], "id":"Conference", "arxiv_id":"2407.05683", "GitHub":[ "https:\/\/github.com\/nainye\/RadiomicsFill" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":349 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2872_paper.pdf", "bibtext":"@InProceedings{ Kan_MedSynth_MICCAI2024,\n author = { Kanagavelu, Renuga and Walia, Madhav and Wang, Yuan and Fu, Huazhu and Wei, Qingsong and Liu, Yong and Goh, Rick Siow Mong },\n title = { { MedSynth: Leveraging Generative Model for Healthcare Data Sharing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Sharing medical datasets among healthcare organizations is\nessential for advancing AI-assisted disease diagnostics and enhancing patient care. Employing techniques like data de-identification and data synthesis in medical data sharing, however, comes with inherent drawbacks that may lead to privacy leakage. Therefore, there is a pressing need\nfor mechanisms that can effectively conceal sensitive information, ensuring a secure environment for data sharing. Dataset Condensation (DC) emerges as a solution, creating a reduced-scale synthetic dataset from a larger original dataset while maintaining comparable training outcomes. This approach offers advantages in terms of privacy and communication\nefficiency in the context of medical data sharing. Despite these benefits, traditional condensation methods encounter challenges, particularly with high-resolution medical datasets. To address these challenges, we present MedSynth, a novel dataset condensation scheme designed to efficiently condense the knowledge within extensive medical datasets into\na generative model. This facilitates the sharing of the generative model across hospitals without the need to disclose raw data. By combining an attention-based generator with a vision transformer (ViT), MedSynth creates a generative model capable of producing a concise set of representative synthetic medical images, encapsulating the features of the\noriginal dataset. This generative model can then be shared with hospitals to optimize various downstream model training tasks. Extensive experimental results across medical datasets demonstrate that MedSynth outperforms state-of-the-art methods. Moreover, MedSynth successfully\ndefends against state-of-the-art Membership Inference Attacks (MIA), highlighting its significant potential in preserving the privacy of medical data.", "title":"MedSynth: Leveraging Generative Model for Healthcare Data Sharing", "authors":[ "Kanagavelu, Renuga", "Walia, Madhav", "Wang, Yuan", "Fu, Huazhu", "Wei, Qingsong", "Liu, Yong", "Goh, Rick Siow Mong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":350 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1207_paper.pdf", "bibtext":"@InProceedings{ Din_HiA_MICCAI2024,\n author = { Ding, Xinpeng and Chu, Yongqiang and Pi, Renjie and Wang, Hualiang and Li, Xiaomeng },\n title = { { HiA: Towards Chinese Multimodal LLMs for Comparative High-Resolution Joint Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multimodal large language models (MLLMs) have been explored in the Chinese medical domain for comprehending complex healthcare. However, due to the flaws in training data and architecture design, current Chinese medical MLLMs suffer from several limitations: cultural biases from English machine translations, limited comparative ability from single image input and difficulty in identifying small lesions with low-resolution images. To address these problems, we first introduce a new instruction-following dataset, Chili-Joint (Chinese Interleaved Image-Text Dataset for Joint Diagnosis) collected from the hospital in mainland China, avoiding cultural biases and errors caused by machine translation. Besides one single image input, Chili-Joint also has multiple images obtained at various intervals during a patient\u2019s treatment, thus facilitating an evaluation of the treatment\u2019s outcomes. We further propose a novel HiA (High-resolution instruction-aware Adapter) to incorporate high-resolutioninstruction-aware visual features into LLMs to facilitate the current MLLMs to observe the small lesions as well as the comparative analysis. Extensive experiments on Chili-Joint demonstrate our HiA can be a plug-and-play method to improve the performance of current MLLMs for medical analysis. The code is available at https:\/\/github.com\/xmed-lab\/HiA.", "title":"HiA: Towards Chinese Multimodal LLMs for Comparative High-Resolution Joint Diagnosis", "authors":[ "Ding, Xinpeng", "Chu, Yongqiang", "Pi, Renjie", "Wang, Hualiang", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xmed-lab\/HiA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":351 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1494_paper.pdf", "bibtext":"@InProceedings{ Wit_SimulationBased_MICCAI2024,\n author = { Wittmann, Bastian and Glandorf, Lukas and Paetzold, Johannes C. and Amiranashvili, Tamaz and W\u00e4lchli, Thomas and Razansky, Daniel and Menze, Bjoern },\n title = { { Simulation-Based Segmentation of Blood Vessels in Cerebral 3D OCTA Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmentation of blood vessels in murine cerebral 3D OCTA images is foundational for in vivo quantitative analysis of the effects of neurovascular disorders, such as stroke or Alzheimer\u2019s, on the vascular network. However, to accurately segment blood vessels with state-of-the-art deep learning methods, a vast amount of voxel-level annotations is required. Since cerebral 3D OCTA images are typically plagued by artifacts and generally have a low signal-to-noise ratio, acquiring manual annotations poses an especially cumbersome and time-consuming task. To alleviate the need for manual annotations, we propose utilizing synthetic data to supervise segmentation algorithms. To this end, we extract patches from vessel graphs and transform them into synthetic cerebral 3D OCTA images paired with their matching ground truth labels by simulating the most dominant 3D OCTA artifacts. In extensive experiments, we demonstrate that our approach achieves competitive results, enabling annotation-free blood vessel segmentation in cerebral 3D OCTA images.", "title":"Simulation-Based Segmentation of Blood Vessels in Cerebral 3D OCTA Images", "authors":[ "Wittmann, Bastian", "Glandorf, Lukas", "Paetzold, Johannes C.", "Amiranashvili, Tamaz", "W\u00e4lchli, Thomas", "Razansky, Daniel", "Menze, Bjoern" ], "id":"Conference", "arxiv_id":"2403.07116", "GitHub":[ "https:\/\/github.com\/bwittmann\/syn-cerebral-octa-seg" ], "paper_page":"https:\/\/huggingface.co\/papers\/2403.07116", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ "bwittmann\/syn-cerebral-octa-seg" ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ "bwittmann\/syn-cerebral-octa-seg" ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":352 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3485_paper.pdf", "bibtext":"@InProceedings{ Das_Confidenceguided_MICCAI2024,\n author = { Das, Abhijit and Gorade, Vandan and Kumar, Komal and Chakraborty, Snehashis and Mahapatra, Dwarikanath and Roy, Sudipta },\n title = { { Confidence-guided Semi-supervised Learning for Generalized Lesion Localization in X-ray Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"In recent years pseudo label (PL) based semi-supervised (SS) methods have been proposed for disease localization in medical images for tasks with limited labeled data. However these models are not curated for chest x-rays containing anomalies of different shapes and sizes. As a result, existing methods suffer from biased attentiveness towards minor class and PL inconsistency. Soft labeling based methods filters out PLs with higher uncertainty but leads to loss of fine-grained features of minor articulates, resulting in sparse prediction. To address these challenges we propose AnoMed, an uncertainty aware SS framework with novel scale invariant bottleneck (SIB) and confidence guided pseudo-label optimizer (PLO). SIB leverages base feature (Fb) obtained from any encoder to capture multi-granular anatomical structures and underlying representations. On top of that, PLO refines hesitant PLs and guides them separately for unsupervised loss, reducing inconsistency. Our extensive experiments on cardiac datasets and out-of-distribution (OOD) fine-tuning demonstrate that AnoMed outperforms other state-of-the-art (SOTA) methods like Efficient Teacher and Mean Teacher with improvement of 4.9 and 5.9 in AP50:95 on VinDr-CXR data. Code for our architecture is available at https:\/\/github.com\/aj-das-research\/AnoMed.", "title":"Confidence-guided Semi-supervised Learning for Generalized Lesion Localization in X-ray Images", "authors":[ "Das, Abhijit", "Gorade, Vandan", "Kumar, Komal", "Chakraborty, Snehashis", "Mahapatra, Dwarikanath", "Roy, Sudipta" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/aj-das-research\/AnoMed" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":353 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1015_paper.pdf", "bibtext":"@InProceedings{ Fu_3DGRCAR_MICCAI2024,\n author = { Fu, Xueming and Li, Yingtai and Tang, Fenghe and Li, Jun and Zhao, Mingyue and Teng, Gao-Jun and Zhou, S. Kevin },\n title = { { 3DGR-CAR: Coronary artery reconstruction from ultra-sparse 2D X-ray views with a 3D Gaussians representation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstructing 3D coronary arteries is important for coronary artery disease diagnosis, treatment planning and operation navigation. Traditional techniques often require many projections, while reconstruction from sparse-view X-ray projections is a potential way of reducing radiation dose. However, the extreme sparsity of coronary artery volume and ultra-limited number of projections pose significant challenges for efficient and accurate 3D reconstruction. We propose 3DGR-CAR, a 3D Gaussian Representation for Coronary Artery Reconstruction from ultra-sparse X-ray projections. We leverage 3D Gaussian representation to avoid the inefficiency caused by the extreme sparsity of coronary artery data, and propose a Gaussian center predictor to overcome the noisy gaussian initialization from ultra-sparse view projections. The proposed scheme enables fast and accurate 3D coronary arteries reconstruction with only 2 views. Experimental results on two datasets indicate that the proposed approach significantly outperforms other methods in terms of voxel accuracy and visual quality of coronary artery.", "title":"3DGR-CAR: Coronary artery reconstruction from ultra-sparse 2D X-ray views with a 3D Gaussians representation", "authors":[ "Fu, Xueming", "Li, Yingtai", "Tang, Fenghe", "Li, Jun", "Zhao, Mingyue", "Teng, Gao-Jun", "Zhou, S. Kevin" ], "id":"Conference", "arxiv_id":"2410.00404", "GitHub":[ "https:\/\/github.com\/windrise\/3DGR-CAR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":354 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3387_paper.pdf", "bibtext":"@InProceedings{ Hol_Glioblastoma_MICCAI2024,\n author = { Holden Helland, Ragnhild and Bouget, David and Eijgelaar, Roelant S. and De Witt Hamer, Philip C. and Barkhof, Frederik and Solheim, Ole and Reinertsen, Ingerid },\n title = { { Glioblastoma segmentation from early post-operative MRI: challenges and clinical impact } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Post-surgical evaluation and quantification of residual tumor tissue from magnetic resonance images (MRI) is a crucial step for treatment planning and follow-up in glioblastoma care. Segmentation of enhancing residual tumor tissue from early post-operative MRI is particularly challenging due to small and fragmented lesions, post-operative bleeding, and noise in the resection cavity. Although a lot of progress has been made on the adjacent task of pre-operative glioblastoma segmentation, more targeted methods are needed for addressing the specific challenges and detecting small lesions. In this study, a state-of-the-art architecture for pre-operative segmentation was used, trained on a large in-house multi-center dataset for early post-operative segmentation. Various pre-processing, data sampling techniques, and architecture variants were explored for improving the detection of small lesions. The models were evaluated on a dataset annotated by 8 novice and expert human raters, and the performance compared against the human inter-rater variability. Trained models\u2019 performance were shown to be on par with the performance of human expert raters. As such, automatic segmentation models have the potential to be a valuable tool in a clinical setting as an accurate and time-saving alternative, compared to the current standard manual method for residual tumor measurement after surgery.", "title":"Glioblastoma segmentation from early post-operative MRI: challenges and clinical impact", "authors":[ "Holden Helland, Ragnhild", "Bouget, David", "Eijgelaar, Roelant S.", "De Witt Hamer, Philip C.", "Barkhof, Frederik", "Solheim, Ole", "Reinertsen, Ingerid" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/dbouget\/validation_metrics_computation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":355 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0158_paper.pdf", "bibtext":"@InProceedings{ Rey_EchoNetSynthetic_MICCAI2024,\n author = { Reynaud, Hadrien and Meng, Qingjie and Dombrowski, Mischa and Ghosh, Arijit and Day, Thomas and Gomez, Alberto and Leeson, Paul and Kainz, Bernhard },\n title = { { EchoNet-Synthetic: Privacy-preserving Video Generation for Safe Medical Data Sharing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"To make medical datasets accessible without sharing sensitive patient information, we introduce a novel end-to-end approach for \ngenerative de-identification of dynamic medical imaging data. \nUntil now, generative methods have faced constraints in terms of fidelity, spatio-temporal coherence, and the length of generation, failing to capture the complete details of dataset distributions.\nWe present a model designed to produce high-fidelity, long and complete data samples with near-real-time efficiency and explore our approach on a challenging task: generating echocardiogram videos. \nWe develop our generation method based on diffusion models and introduce a protocol for medical video dataset anonymization. \nAs an exemplar, we present EchoNet-Synthetic, a fully synthetic, privacy-compliant echocardiogram dataset with paired ejection fraction labels. \nAs part of our de-identification protocol, we evaluate the quality of the generated dataset and propose to use clinical downstream tasks as a measurement on top of widely used but potentially biased image quality metrics.\nExperimental outcomes demonstrate that EchoNet-Synthetic achieves comparable dataset fidelity to the actual dataset, effectively supporting the ejection fraction regression task.\nCode, weights and dataset are available at https:\/\/github.com\/HReynaud\/EchoNet-Synthetic.", "title":"EchoNet-Synthetic: Privacy-preserving Video Generation for Safe Medical Data Sharing", "authors":[ "Reynaud, Hadrien", "Meng, Qingjie", "Dombrowski, Mischa", "Ghosh, Arijit", "Day, Thomas", "Gomez, Alberto", "Leeson, Paul", "Kainz, Bernhard" ], "id":"Conference", "arxiv_id":"2406.00808", "GitHub":[ "https:\/\/github.com\/HReynaud\/EchoNet-Synthetic" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":356 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3653_paper.pdf", "bibtext":"@InProceedings{ Hu_ConsecutiveContrastive_MICCAI2024,\n author = { Hu, Dan and Han, Kangfu and Cheng, Jiale and Li, Gang },\n title = { { Consecutive-Contrastive Spherical U-net: Enhancing Reliability of Individualized Functional Brain Parcellation for Short-duration fMRI Scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Individualized brain parcellations derived from functional MRI (fMRI) are essential for discerning unique functional patterns of individuals, facilitat-ing personalized diagnoses and treatments. Unfortunately, as fMRI signals are inherently noisy, establishing reliable individualized parcellations typ-ically necessitates long-duration fMRI scan (> 25 min), posing a major chal-lenge and resulting in the exclusion of numerous short-duration fMRI scans from individualized studies. To address this issue, we develop a novel Consecutive-Contrastive Spherical U-net (CC-SUnet) to enable the predic-tion of reliable individualized brain parcellation using short-duration fMRI data, greatly expanding its practical applicability. Specifically, 1) the wide-ly used functional diffusion map (DM), obtained from functional connec-tivity, is carefully selected as the predictive feature, for its advantage in tracing the transitions between regions while reducing noise. To ensure a robust depiction of brain network, we propose a dual-task model to predict DM and cortical parcellation simultaneously, fully utilizing their reciprocal relationship. 2) By constructing a stepwise dataset to capture the gradual changes of DM over increasing scan durations, a consecutive prediction framework is designed to realize the prediction from short-to-long gradual-ly. 3) A stepwise-denoising-prediction module is further proposed. The noise representations are separated and replaced by the latent representa-tions of a group-level diffusion map, realizing informative guidance and de-noising concurrently. 4) Additionally, an N-pair contrastive loss is intro-duced to strengthen the discriminability of the individualized parcella-tions. Extensive experimental results demonstrated the superiority of our proposed CC-SUnet in enhancing the reliability of the individualized par-cellation with short-duration fMRI data, thereby significantly boosting their utility in individualized studies.", "title":"Consecutive-Contrastive Spherical U-net: Enhancing Reliability of Individualized Functional Brain Parcellation for Short-duration fMRI Scans", "authors":[ "Hu, Dan", "Han, Kangfu", "Cheng, Jiale", "Li, Gang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":357 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1619_paper.pdf", "bibtext":"@InProceedings{ Xu_Multiscale_MICCAI2024,\n author = { Xu, Yanyu and Xia, Yingzhi and Fu, Huazhu and Goh, Rick Siow Mong and Liu, Yong and Xu, Xinxing },\n title = { { Multi-scale Region-aware Implicit Neural Network for Medical Images Matting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation is a critical task in computer-assisted diagnosis and disease monitoring, where labeling complex and ambiguous targets poses a significant challenge. Recently, the alpha matte has been investigated as a soft mask in medical scenes, using continuous values to quantify and distinguish uncertain lesions with high diagnostic values. In this work, we propose a multi-scale regions-aware implicit function network for the medical matting problem. Firstly, we design an regions-aware implicit neural function to interpolate over larger and more flexible regions, preserving important input details. Further, the method employs multi-scale feature fusion to efficiently and precisely aggregate features from different levels. Experimental results on public medical matting datasets demonstrate the effectiveness of our proposed approach, and we release the codes and models in https:\/\/github.com\/xuyanyu-shh\/MedicalMattingMLP.", "title":"Multi-scale Region-aware Implicit Neural Network for Medical Images Matting", "authors":[ "Xu, Yanyu", "Xia, Yingzhi", "Fu, Huazhu", "Goh, Rick Siow Mong", "Liu, Yong", "Xu, Xinxing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xuyanyu-shh\/MedicalMattingMLP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":358 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1328_paper.pdf", "bibtext":"@InProceedings{ Lai_EchoMEN_MICCAI2024,\n author = { Lai, Song and Zhao, Mingyang and Zhao, Zhe and Chang, Shi and Yuan, Xiaohua and Liu, Hongbin and Zhang, Qingfu and Meng, Gaofeng },\n title = { { EchoMEN: Combating Data Imbalance in Ejection Fraction Regression via Multi-Expert Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ejection Fraction (EF) regression faces a critical challenge due to severe data imbalance since samples in the normal EF range significantly outnumber those in the abnormal range. This imbalance results in a bias in existing EF regression methods towards the normal population, undermining health equity. Furthermore, current imbalanced regression methods struggle with the head-tail performance trade-off, leading to increased prediction errors for the normal population. In this paper, we introduce EchoMEN, a multi-expert model designed to improve EF regression with balanced performance. EchoMEN adopts a two-stage decoupled training strategy. The first stage proposes a Label-Distance Weighted Supervised Contrastive Loss to enhance representation learning. This loss considers the label relationship among negative sample pairs, which encourages samples further apart in label space to be further apart in feature space. The second stage trains multiple regression experts independently with variably re-weighted settings, focusing on different parts of the target region. Their predictions are then combined using a weighted method to learn an unbiased ensemble regressor. Extensive experiments on the EchoNet-Dynamic dataset demonstrate that EchoMEN outperforms state-of-the-art algorithms and achieves well-balanced performance throughout all heart failure categories. Code: https:\/\/github.com\/laisong-22004009\/EchoMEN.", "title":"EchoMEN: Combating Data Imbalance in Ejection Fraction Regression via Multi-Expert Network", "authors":[ "Lai, Song", "Zhao, Mingyang", "Zhao, Zhe", "Chang, Shi", "Yuan, Xiaohua", "Liu, Hongbin", "Zhang, Qingfu", "Meng, Gaofeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/laisong-22004009\/EchoMEN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":359 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2991_paper.pdf", "bibtext":"@InProceedings{ Yan_SCMIL_MICCAI2024,\n author = { Yang, Zekang and Liu, Hong and Wang, Xiangdong },\n title = { { SCMIL: Sparse Context-aware Multiple Instance Learning for Predicting Cancer Survival Probability Distribution in Whole Slide Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cancer survival prediction is a challenging task that involves analyzing of the tumor microenvironment within Whole Slide Image (WSI). Previous methods cannot effectively capture the intricate interaction features among instances within the local area of WSI. Moreover, existing methods for cancer survival prediction based on WSI often fail to provide better clinically meaningful predictions. To overcome these challenges, we propose a Sparse Context-aware Multiple Instance Learning (SCMIL) framework for predicting cancer survival probability distributions. SCMIL innovatively segments patches into various clusters based on their morphological features and spatial location information, subsequently leveraging sparse self-attention to discern the relationships between these patches with a context-aware perspective. Considering many patches are irrelevant to the task, we introduce a learnable patch filtering module called SoftFilter, which ensures that only interactions between task-relevant patches are considered. To enhance the clinical relevance of our prediction, we propose a register-based mixture density network to forecast the survival probability distribution for individual patients. We evaluate SCMIL on two public WSI datasets from the The Cancer Genome Atlas (TCGA) specifically focusing on lung adenocarcinom (LUAD) and kidney renal clear cell carcinoma (KIRC). Our experimental results indicate that SCMIL outperforms current state-of-the-art methods for survival prediction, offering more clinically meaningful and interpretable outcomes. Our code is accessible at https:\/\/github.com\/yang-ze-kang\/SCMIL.", "title":"SCMIL: Sparse Context-aware Multiple Instance Learning for Predicting Cancer Survival Probability Distribution in Whole Slide Images", "authors":[ "Yang, Zekang", "Liu, Hong", "Wang, Xiangdong" ], "id":"Conference", "arxiv_id":"2407.00664", "GitHub":[ "https:\/\/github.com\/yang-ze-kang\/SCMIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":360 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0670_paper.pdf", "bibtext":"@InProceedings{ Yan_CoarseGrained_MICCAI2024,\n author = { Yan, Yige and Cheng, Jun and Yang, Xulei and Gu, Zaiwang and Leng, Shuang and Tan, Ru San and Zhong, Liang and Rajapakse, Jagath C. },\n title = { { Coarse-Grained Mask Regularization for Microvascular Obstruction Identification from non-contrast Cardiac Magnetic Resonance } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Identification of microvascular obstruction (MVO) in acute myocardial infarction patients is critical for prognosis and has a direct link to mortality risk. Current approaches using late gadolinium enhancement (LGE) for contrast-enhanced cardiac magnetic resonance (CMR) pose risks to the kidney and may not be applicable to many patients. This highlights the need to explore alternative non-contrast imaging methods, such as cine CMR, for MVO identification. However, the scarcity of datasets and the challenges in annotation make the MVO identification in cine CMR challenging and remain largely under-explored. For this purpose, we propose a non-contrast MVO identification framework in cine CMR with a novel coarse-grained mask regularization strategy to better utilize information from LGE annotations in training. We train and test our model on a dataset comprising 680 cases. Our model demonstrates superior performance over competing methods in cine CMR-based MVO identification, proving its feasibility and presenting a novel and patient-friendly approach to the field. The code is available at https:\/\/github.com\/code-koukai\/MVO-identification.", "title":"Coarse-Grained Mask Regularization for Microvascular Obstruction Identification from non-contrast Cardiac Magnetic Resonance", "authors":[ "Yan, Yige", "Cheng, Jun", "Yang, Xulei", "Gu, Zaiwang", "Leng, Shuang", "Tan, Ru San", "Zhong, Liang", "Rajapakse, Jagath C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/code-koukai\/MVO-identification" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":361 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1538_paper.pdf", "bibtext":"@InProceedings{ Gao_MBANet_MICCAI2024,\n author = { Gao, Yifan and Xia, Wei and Wang, Wenkui and Gao, Xin },\n title = { { MBA-Net: SAM-driven Bidirectional Aggregation Network for Ovarian Tumor Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of ovarian tumors from medical images is crucial for early diagnosis, treatment planning, and patient management. However, the diverse morphological characteristics and heterogeneous appearances of ovarian tumors pose significant challenges to automated segmentation methods. In this paper, we propose MBA-Net, a novel architecture that integrates the powerful segmentation capabilities of the Segment Anything Model (SAM) with domain-specific knowledge for accurate and robust ovarian tumor segmentation. MBA-Net employs a hybrid encoder architecture, where the encoder consists of a prior branch, which inherits the SAM encoder to capture robust segmentation priors, and a domain branch, specifically designed to extract domain-specific features. The bidirectional flow of information between the two branches is facilitated by the robust feature injection network (RFIN) and the domain knowledge integration network (DKIN), enabling MBA-Net to leverage the complementary strengths of both branches. We extensively evaluate MBA-Net on the public multi-modality ovarian tumor ultrasound dataset and the in-house multi-site ovarian tumor MRI dataset. Our proposed method consistently outperforms state-of-the-art segmentation approaches. Moreover, MBA-Net demonstrates superior generalization capability across different imaging modalities and clinical sites.", "title":"MBA-Net: SAM-driven Bidirectional Aggregation Network for Ovarian Tumor Segmentation", "authors":[ "Gao, Yifan", "Xia, Wei", "Wang, Wenkui", "Gao, Xin" ], "id":"Conference", "arxiv_id":"2407.05984", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":362 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1354_paper.pdf", "bibtext":"@InProceedings{ Fan_PathMamba_MICCAI2024,\n author = { Fan, Jiansong and Lv, Tianxu and Di, Yicheng and Li, Lihua and Pan, Xiang },\n title = { { PathMamba: Weakly Supervised State Space Model for Multi-class Segmentation of Pathology Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of pathology images plays a crucial role in digital pathology workflow. Fully supervised models have achieved excellent performance through dense pixel-level annotation. However, annotation on gigapixel pathology images is extremely expensive and time-consuming. Recently, the state space model with efficient hardware-aware design, known as Mamba, has achieved impressive results. In this paper, we propose a weakly supervised state space model (PathMamba) for multi-class segmentation of pathology images using only image-level labels. Our method integrates the standard features of both pixel-level and patch-level pathology images and can generate more regionally consistent segmentation results. Specifically, we first extract pixel-level feature maps based on Multi-Instance Multi-Label Learning by treating pixels as instances, which are subsequently injected into our designed Contrastive Mamba Block. The Contrastive Mamba Block adopts a state space model and integrates the concept of contrastive learning to extract non-causal dual-granularity features in pathological images. In addition, we suggest a Deep Contrast Supervised Loss to fully utilize the limited annotated information in weakly supervised methods. Our approach facilitates a comprehensive feature learning process and captures complex details and broader global contextual semantics in pathology images. Experiments on two public pathology image datasets show that the proposed method performs better than state-of-the-art weakly supervised methods. The code is available at https:\/\/github.com\/hemo0826\/PathMamba.", "title":"PathMamba: Weakly Supervised State Space Model for Multi-class Segmentation of Pathology Images", "authors":[ "Fan, Jiansong", "Lv, Tianxu", "Di, Yicheng", "Li, Lihua", "Pan, Xiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/hemo0826\/PathMamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":363 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3214_paper.pdf", "bibtext":"@InProceedings{ Wan_Doubletier_MICCAI2024,\n author = { Wang, Mingkang and Wang, Tong and Cong, Fengyu and Lu, Cheng and Xu, Hongming },\n title = { { Double-tier Attention based Multi-label Learning Network for Predicting Biomarkers from Whole Slide Images of Breast Cancer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Hematoxylin and eosin (H&E) staining offers the advantages of low cost and high stability, effectively revealing the morphological structure of the nucleus and tissue. Predicting the expression levels of estrogen receptor (ER), progesterone receptor (PR), and human epidermal growth factor receptor 2 (HER2) from H&E stained slides is crucial for reducing the detection cost of the immunohistochemistry (IHC) method and tailoring the treatment of breast cancer patients. However, this task faces significant challenges due to the scarcity of large-scale and well-annotated datasets. In this paper, we propose a double-tier attention based multi-label learning network, termed as DAMLN, for simultaneous prediction of ER, PR, and HER2 from H&E stained WSIs. Our DAMLN considers slides and their tissue tiles as bags and instances under a multiple instance learning (MIL) setting. First, the instances are encoded via a pretrained CTransPath model and randomly divided into a set of pseudo bags. Pseudo-bag guided learning via cascading the multi-head self-attention (MSA) and linear MSA blocks is then conducted to generate pseudo-bag level representations. Finally, attention-pooling is applied to class tokens of pseudo bags to generate multiple biomarker predictions. Our experiments conducted on large-scale datasets with over 3000 patients demonstrate great improvements over comparative MIL models.", "title":"Double-tier Attention based Multi-label Learning Network for Predicting Biomarkers from Whole Slide Images of Breast Cancer", "authors":[ "Wang, Mingkang", "Wang, Tong", "Cong, Fengyu", "Lu, Cheng", "Xu, Hongming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/PerrySkywalker\/DAMLN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":364 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1977_paper.pdf", "bibtext":"@InProceedings{ Han_On_MICCAI2024,\n author = { Han, Tianyu and Nebelung, Sven and Khader, Firas and Kather, Jakob Nikolas and Truhn, Daniel },\n title = { { On Instabilities of Unsupervised Denoising Diffusion Models in Magnetic Resonance Imaging Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Denoising diffusion models offer a promising approach to accelerating magnetic resonance imaging (MRI) and producing diagnostic-level images in an unsupervised manner. However, our study demonstrates that even tiny worst-case potential perturbations transferred from a surrogate model can cause these models to generate fake tissue structures that may mislead clinicians. The transferability of such worst-case perturbations indicates that the robustness of image reconstruction may be compromised due to MR system imperfections or other sources of noise. Moreover, at larger perturbation strengths, diffusion models exhibit Gaussian noise-like artifacts that are distinct from those observed in supervised models and are more challenging to detect. Our results highlight the vulnerability of current state-of-the-art diffusion-based reconstruction models to possible worst-case perturbations and underscore the need for further research to improve their robustness and reliability in clinical settings.", "title":"On Instabilities of Unsupervised Denoising Diffusion Models in Magnetic Resonance Imaging Reconstruction", "authors":[ "Han, Tianyu", "Nebelung, Sven", "Khader, Firas", "Kather, Jakob Nikolas", "Truhn, Daniel" ], "id":"Conference", "arxiv_id":"2406.16983", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":365 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3117_paper.pdf", "bibtext":"@InProceedings{ Han_BAPLe_MICCAI2024,\n author = { Hanif, Asif and Shamshad, Fahad and Awais, Muhammad and Naseer, Muzammal and Shahbaz Khan, Fahad and Nandakumar, Karthik and Khan, Salman and Anwer, Rao Muhammad },\n title = { { BAPLe: Backdoor Attacks on Medical Foundational Models using Prompt Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical foundation models are gaining prominence in the medical community for their ability to derive general representations from extensive collections of medical image-text pairs. Recent research indicates that these models are susceptible to backdoor attacks, which allow them to classify clean images accurately but fail when specific triggers are introduced. However, traditional backdoor attacks necessitate a considerable amount of additional data to maliciously pre-train a model. This requirement is often impractical in medical imaging applications due to the usual scarcity of data. Inspired by the latest developments in learnable prompts, this work introduces a method to embed a backdoor into the medical foundation model during the prompt learning phase. By incorporating learnable prompts within the text encoder and introducing imperceptible learnable noise trigger to the input images, we exploit the full capabilities of the medical foundation models (Med-FM). Our method requires only a minimal subset of data to adjust the text prompts for downstream tasks, enabling the creation of an effective backdoor attack. Through extensive experiments with four medical foundation models, each pre-trained on different modalities and evaluated across six downstream datasets, we demonstrate the efficacy of our approach. Code is available at https:\/\/github.com\/asif-hanif\/baple", "title":"BAPLe: Backdoor Attacks on Medical Foundational Models using Prompt Learning", "authors":[ "Hanif, Asif", "Shamshad, Fahad", "Awais, Muhammad", "Naseer, Muzammal", "Shahbaz Khan, Fahad", "Nandakumar, Karthik", "Khan, Salman", "Anwer, Rao Muhammad" ], "id":"Conference", "arxiv_id":"2408.07440", "GitHub":[ "https:\/\/github.com\/asif-hanif\/baple" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":366 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2456_paper.pdf", "bibtext":"@InProceedings{ She_FastSAM3D_MICCAI2024,\n author = { Shen, Yiqing and Li, Jingxing and Shao, Xinyuan and Inigo Romillo, Blanca and Jindal, Ankush and Dreizin, David and Unberath, Mathias },\n title = { { FastSAM3D: An Efficient Segment Anything Model for 3D Volumetric Medical Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segment anything models (SAMs) are gaining attention for their zero-shot generalization capability in segmenting objects of unseen classes and in unseen domains when properly prompted. Interactivity is a key strength of SAMs, allowing users to iteratively provide prompts that specify objects of interest to refine outputs. However, to realize the interactive use of SAMs for 3D medical imaging tasks, rapid inference times are necessary. High memory requirements and long processing delays remain constraints that hinder the adoption of SAMs for this purpose. Specifically, while 2D SAMs applied to 3D volumes contend with repetitive computation to process all slices independently, 3D SAMs suffer from an exponential increase in model parameters and FLOPS. To address these challenges, we present FastSAM3D which accelerates SAM inference to 8 milliseconds per 128\u00d7128\u00d7128 3D volumetric image on an NVIDIA A100 GPU. This speedup is accomplished through 1) a novel layer-wise progressive distillation scheme that enables knowledge transfer from a complex 12-layer ViT-B to a lightweight 6-layer ViT-Tiny variant encoder without training from scratch; and 2) a novel 3D sparse flash attention to replace vanilla attention operators, substantially reducing memory needs and improving parallelization. Experiments on three diverse datasets reveal that FastSAM3D achieves a remarkable speedup of 527.38\u00d7 compared to 2D SAMs and 8.75\u00d7 compared to 3D SAMs on the same volumes without significant performance decline. Thus, FastSAM3D opens the door for low-cost truly interactive SAM-based 3D medical imaging segmentation with commonly used GPU hardware. Code is available at https:\/\/anonymous.4open.science\/r\/FastSAM3D-v1", "title":"FastSAM3D: An Efficient Segment Anything Model for 3D Volumetric Medical Images", "authors":[ "Shen, Yiqing", "Li, Jingxing", "Shao, Xinyuan", "Inigo Romillo, Blanca", "Jindal, Ankush", "Dreizin, David", "Unberath, Mathias" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/arcadelab\/FastSAM3D" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":367 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3922_paper.pdf", "bibtext":"@InProceedings{ Tan_RDDNet_MICCAI2024,\n author = { Tang, Yilin and Zhang, Min and Feng, Jun },\n title = { { RDD-Net: Randomized Joint Data-Feature Augmentation and Deep-Shallow Feature Fusion Networks for Automated Diagnosis of Glaucoma } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Glaucoma is an irreversible eye disease that has become the leading cause of human blindness worldwide. In recent years, deep learning shows great potential for computer-aided diagnosis in clinics. However, the diversity in medical image quality and acquisition devices leads to distribution shifts that compromise the generalization performance of deep learning methods. To address this issue, many methods relied on deep feature learning combined with the employment of data-level augmentation or feature-level augmentation, respectively. these methods suffer from the limited search space of feature styles. Previous research indicated that introducing a diverse set of augmentations and domain randomization during training can expand the search space of feature styles. In this paper, we propose a Randomized joint Data-feature augmentation and Deep-shallow feature fusion method for automated diagnosis of glaucoma (RDD-Net). It consists of three main components: Data\/Feature-level Augmentation (DFA), Explicit\/Implicit augmentation (EI), and Deep-Shallow feature fusion (DS). DFA randomly selects data\/feature-level augmentation statistics from a uniform distribution. EI involves both explicit augmentation, perturbing the style of the source domain data, and implicit augmentation, utilizing moments information. The randomized selection of different augmentation strategies broadens the diversity of feature styles. DS integrates deep-shallow features within the backbone. Extensive experiments have shown that RDD-Net achieves the SOTA effectiveness and generalization ability. The code is available at https:\/\/github.com\/TangYilin610\/RDD-Net.", "title":"RDD-Net: Randomized Joint Data-Feature Augmentation and Deep-Shallow Feature Fusion Networks for Automated Diagnosis of Glaucoma", "authors":[ "Tang, Yilin", "Zhang, Min", "Feng, Jun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/TangYilin610\/RDD-Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":368 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1503_paper.pdf", "bibtext":"@InProceedings{ Suv_Multimodal_MICCAI2024,\n author = { Suvon, Mohammod N. I. and Tripathi, Prasun C. and Fan, Wenrui and Zhou, Shuo and Liu, Xianyuan and Alabed, Samer and Osmani, Venet and Swift, Andrew J. and Chen, Chen and Lu, Haiping },\n title = { { Multimodal Variational Autoencoder for Low-cost Cardiac Hemodynamics Instability Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in non-invasive detection of Cardiac Hemodynamic Instability (CHDI) primarily focus on applying machine learning techniques to a single data modality, e.g. cardiac magnetic resonance imaging (MRI). Despite their potential, these approaches often fall short especially when the size of labeled patient data is limited, a common challenge in the medical domain. Furthermore, only a few studies have explored multimodal methods to study CHDI, which mostly rely on costly modalities such as cardiac MRI and echocardiogram. In response to these limitations, we propose a novel multimodal variational autoencoder (CardioVAE_X, G) to integrate low-cost chest X-ray (CXR) and electrocardiogram (ECG) modalities with pre-training on a large unlabeled dataset. Specifically, CardioVAE_X, G introduces a novel tri-stream pre-training strategy to learn both shared and modality-specific features, thus enabling fine-tuning with both unimodal and multimodal datasets. We pre-train CardioVAE_X, G on a large, unlabeled dataset of 50,982 subjects from a subset of MIMIC database and then fine-tune the pre-trained model on a labeled dataset of 795 subjects from the ASPIRE registry. Comprehensive evaluations against existing methods show that CardioVAE_X, G offers promising performance (AUROC = 0.79 and Accuracy = 0.77), representing a significant step forward in non-invasive prediction of CHDI. Our model also excels in producing fine interpretations of predictions directly associated with clinical features, thereby supporting clinical decision-making.", "title":"Multimodal Variational Autoencoder for Low-cost Cardiac Hemodynamics Instability Detection", "authors":[ "Suvon, Mohammod N. I.", "Tripathi, Prasun C.", "Fan, Wenrui", "Zhou, Shuo", "Liu, Xianyuan", "Alabed, Samer", "Osmani, Venet", "Swift, Andrew J.", "Chen, Chen", "Lu, Haiping" ], "id":"Conference", "arxiv_id":"2403.13658", "GitHub":[ "https:\/\/github.com\/Shef-AIRE\/AI4Cardiothoracic-CardioVAE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":369 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2097_paper.pdf", "bibtext":"@InProceedings{ Zha_MAdapter_MICCAI2024,\n author = { Zhang, Xu and Ni, Bo and Yang, Yang and Zhang, Lefei },\n title = { { MAdapter: A Better Interaction between Image and Language for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Conventional medical image segmentation methods are only based on images, implying a requirement for adequate high-quality labeled images. Text-guided segmentation methods have been widely regarded as a solution to break the performance bottleneck. In this study, we introduce a bidirectional Medical Adaptor (MAdapter) where visual and linguistic features extracted from pre-trained dual encoders undergo interactive fusion. Additionally, a specialized decoder is designed to further align the fusion representation and global textual representation. Besides, We extend the endoscopic polyp datasets with clinical-oriented text annotations, following the guidance of medical professionals. Extensive experiments conducted on both the extended endoscopic polyp dataset and additional lung infection datasets demonstrate the superiority of our method.", "title":"MAdapter: A Better Interaction between Image and Language for Medical Image Segmentation", "authors":[ "Zhang, Xu", "Ni, Bo", "Yang, Yang", "Zhang, Lefei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/XShadow22\/MAdapter" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":370 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0551_paper.pdf", "bibtext":"@InProceedings{ Aze_Deep_MICCAI2024,\n author = { Azevedo, Caio and Santra, Sanchayan and Kumawat, Sudhakar and Nagahara, Hajime and Morooka, Ken'ichi },\n title = { { Deep Volume Reconstruction from Multi-focus Microscopic Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstructing 3D volumes from optical microscopic images\nis useful in important areas such as cellular analysis, cancer research, and drug development. However, existing techniques either require specialized hardware or extensive sample preprocessing. Recently, Yamaguchi et al proposed to solve this problem by just using a single stack of optical microscopic images with different focus settings and reconstructing\na voxel-based representation of the observation using the classical iterative optimization method. Inspired by this result, this work aims to explore this method further using new state-of-the-art optimization techniques such as Deep Image Prior (DIP). Our analysis showcases the superiority of this approach over Yamaguchi et al in reconstruction quality, hard metrics, and robustness to noise on the synthetic data. Finally, we also demonstrate the effectiveness of our approach on real data,\nproducing excellent reconstruction quality. Code available at: https:\/\/github.com\/caiocj1\/multifocus-3d-reconstruction.", "title":"Deep Volume Reconstruction from Multi-focus Microscopic Images", "authors":[ "Azevedo, Caio", "Santra, Sanchayan", "Kumawat, Sudhakar", "Nagahara, Hajime", "Morooka, Ken'ichi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/caiocj1\/multifocus-3d-reconstruction" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":371 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1838_paper.pdf", "bibtext":"@InProceedings{ Gun_Online_MICCAI2024,\n author = { Gunnarsson, Niklas and Sj\u00f6lund, Jens and Kimstrand, Peter and Sch\u00f6n, Thomas B. },\n title = { { Online learning in motion modeling for intra-interventional image sequences } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Image monitoring and guidance during medical examinations can aid both diagnosis and treatment. However, the sampling frequency is often too low, which creates a need to estimate the missing images. We present a probabilistic motion model for sequential medical images, with the ability to both estimate motion between acquired images and forecast the motion ahead of time. The core is a low-dimensional temporal process based on a linear Gaussian state-space model with analytically tractable solutions for forecasting, simulation, and imputation of missing samples. The results, from two experiments on publicly available cardiac datasets, show reliable motion estimates and an improved forecasting performance using patient-specific adaptation by online learning.", "title":"Online learning in motion modeling for intra-interventional image sequences", "authors":[ "Gunnarsson, Niklas", "Sj\u00f6lund, Jens", "Kimstrand, Peter", "Sch\u00f6n, Thomas B." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ngunnar\/2D_motion_model" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":372 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2012_paper.pdf", "bibtext":"@InProceedings{ Rei_Unsupervised_MICCAI2024,\n author = { Reisenb\u00fcchler, Daniel and Luttner, Lucas and Schaadt, Nadine S. and Feuerhake, Friedrich and Merhof, Dorit },\n title = { { Unsupervised Latent Stain Adaptation for Computational Pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In computational pathology, deep learning (DL) models for tasks such as segmentation or tissue classification are known to suffer from domain shifts due to different staining techniques. Stain adaptation aims to reduce the generalization error between different stains by training a model on source stains that generalizes to target stains. Despite the abundance of target stain data, a key challenge is the lack of annotations. To address this, we propose a joint training between artificially labeled and unlabeled data including all available stained images called Unsupervised Latent Stain Adaptation (ULSA). Our method uses stain translation to enrich labeled source images with synthetic target images in order to increase the supervised signals. Moreover, we leverage unlabeled target stain images using stain-invariant feature consistency learning. With ULSA we present a semi-supervised strategy for efficient stain adaptation without access to annotated target stain data. Remarkably, ULSA is task agnostic in patch-level analysis for whole slide images (WSIs). Through extensive evaluation on external datasets, we demonstrate that ULSA achieves state-of-the-art (SOTA) performance in kidney tissue segmentation and breast cancer classification across a spectrum of staining variations. Our findings suggest that ULSA is an important framework for stain adaptation in computational pathology.", "title":"Unsupervised Latent Stain Adaptation for Computational Pathology", "authors":[ "Reisenb\u00fcchler, Daniel", "Luttner, Lucas", "Schaadt, Nadine S.", "Feuerhake, Friedrich", "Merhof, Dorit" ], "id":"Conference", "arxiv_id":"2406.19081", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":373 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0383_paper.pdf", "bibtext":"@InProceedings{ Zha_Diseaseinformed_MICCAI2024,\n author = { Zhang, Jiajin and Wang, Ge and Kalra, Mannudeep K. and Yan, Pingkun },\n title = { { Disease-informed Adaptation of Vision-Language Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In medical image analysis, the expertise scarcity and the high cost of data annotation limits the development of large artificial intelligence models. This paper investigates the potential of transfer learning with pre-trained vision-language models (VLMs) in this domain. Currently, VLMs still struggle to transfer to the underrepresented diseases with minimal presence and new diseases entirely absent from the pre-training dataset. We argue that effective adaptation of VLMs hinges on the nuanced representation learning of disease concepts. By capitalizing on the joint visual-linguistic capabilities of VLMs, we introduce disease-informed contextual prompting in a novel disease prototype learning framework. This approach enables VLMs to grasp the concepts of new disease effectively and efficiently, even with limited data. Extensive experiments across multiple image modalities showcase notable enhancements in performance compared to existing techniques.", "title":"Disease-informed Adaptation of Vision-Language Models", "authors":[ "Zhang, Jiajin", "Wang, Ge", "Kalra, Mannudeep K.", "Yan, Pingkun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/RPIDIAL\/Disease-informed-VLM-Adaptation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":374 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0321_paper.pdf", "bibtext":"@InProceedings{ Mia_Cross_MICCAI2024,\n author = { Miao, Juzheng and Chen, Cheng and Zhang, Keli and Chuai, Jie and Li, Quanzheng and Heng, Pheng-Ann },\n title = { { Cross Prompting Consistency with Segment Anything Model for Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning (SSL) has achieved notable progress in medical image segmentation. To achieve effective SSL, a model needs to be able to efficiently learn from limited labeled data and effectively exploiting knowledge from abundant unlabeled data. Recent developments in visual foundation models, such as the Segment Anything Model (SAM), have demonstrated remarkable adaptability with improved sample efficiency. To harness the power of foundation models for application in SSL, we propose a cross prompting consistency method with segment anything model (CPC-SAM) for semi-supervised medical image segmentation. Our method employs SAM\u2019s unique prompt design and innovates a cross-prompting strategy within a dual-branch framework to automatically generate prompts and supervisions across two decoder branches, enabling effectively learning from both scarce labeled and valuable unlabeled data. We further design a novel prompt consistency regularization, to reduce the prompt position sensitivity and to enhance the output invariance under different prompts. We validate our method on two medical image segmentation tasks. The extensive experiments with different labeled-data ratios and modalities demonstrate the superiority of our proposed method over the state-of-the-art SSL methods, with more than 9% Dice improvement on the breast cancer segmentation task.", "title":"Cross Prompting Consistency with Segment Anything Model for Semi-supervised Medical Image Segmentation", "authors":[ "Miao, Juzheng", "Chen, Cheng", "Zhang, Keli", "Chuai, Jie", "Li, Quanzheng", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"2407.05416", "GitHub":[ "https:\/\/github.com\/JuzhengMiao\/CPC-SAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":375 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3035_paper.pdf", "bibtext":"@InProceedings{ Wan_An_MICCAI2024,\n author = { Wang, Xiaocheng and Mekbib, D. B. and Zhou, Tian and Zhu, Junming and Zhang, Li and Cheng, Ruidong and Zhang, Jianmin and Ye, Xiangming and Xu, Dongrong },\n title = { { An MR-Compatible Virtual Reality System for Assessing Neuronal Plasticity of Sensorimotor Neurons and Mirror Neurons } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Virtual reality (VR) assisted rehabilitation system is being used more commonly in supplementing upper extremities (UE) functional rehabilitation. Mirror therapy (MT) is reportedly a useful training in encouraging motor functional recovery. However, the majority of current systems are not compatible with magnetic resonance (MR) environments. Resting-state functional magnetic resonance imaging (rs-fMRI) data, for measuring neuronal recovery status, can only be collected by these systems after the participants have been done with the VR therapy. As a result, real-time observation of the brain in working status remains unattainable. To address this challenge, we developed a novel MR-compatible VR system for Assessment of UE motor functions (MR.VRA). Three different modes are provided adapting to a participant\u2019s appropriate levels of sensorimotor cortex impairment, including a unilateral-contralateral mode, a unilateral-ipsilateral mode, and a unilateral-bilateral mode. Twenty healthy subjects were recruited to validate MR.VRA for UE function rehabilitation and assessment in three fMRI tasks. The results showed that MR.VRA succeeded in conducting the fMRI tasks in the MR scanner bore while stimulating the sensorimotor neurons and mirror neurons using its embedded therapies. The findings suggested that MR.VRA may be a promising alternative for assessing neurorehabilitation of stroke patients with UE motor function impairment in MR environment, which allows inspection of direct imaging evidence of activities of neurons in the cortices related to UE motor functions.", "title":"An MR-Compatible Virtual Reality System for Assessing Neuronal Plasticity of Sensorimotor Neurons and Mirror Neurons", "authors":[ "Wang, Xiaocheng", "Mekbib, D. B.", "Zhou, Tian", "Zhu, Junming", "Zhang, Li", "Cheng, Ruidong", "Zhang, Jianmin", "Ye, Xiangming", "Xu, Dongrong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":376 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1908_paper.pdf", "bibtext":"@InProceedings{ Pra_3D_MICCAI2024,\n author = { Prabhakar, Chinmay and Shit, Suprosanna and Musio, Fabio and Yang, Kaiyuan and Amiranashvili, Tamaz and Paetzold, Johannes C. and Li, Hongwei Bran and Menze, Bjoern },\n title = { { 3D Vessel Graph Generation Using Denoising Diffusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Blood vessel networks, represented as 3D graphs, help predict disease biomarkers, simulate blood flow, and aid in synthetic image generation, relevant in both clinical and pre-clinical settings. However, generating realistic vessel graphs that correspond to an anatomy of interest is challenging. Previous methods aimed at generating vessel trees mostly in an autoregressive style and could not be applied to vessel graphs with cycles such as capillaries or specific anatomical structures such as the Circle of Willis. Addressing this gap, we introduce the first application of \\textit{denoising diffusion models} in 3D vessel graph generation. Our contributions include a novel, two-stage generation method that sequentially denoises node coordinates and edges. We experiment with two real-world vessel datasets, consisting of microscopic capillaries and major cerebral vessels, and demonstrate the generalizability of our method for producing diverse, novel, and anatomically plausible vessel graphs.", "title":"3D Vessel Graph Generation Using Denoising Diffusion", "authors":[ "Prabhakar, Chinmay", "Shit, Suprosanna", "Musio, Fabio", "Yang, Kaiyuan", "Amiranashvili, Tamaz", "Paetzold, Johannes C.", "Li, Hongwei Bran", "Menze, Bjoern" ], "id":"Conference", "arxiv_id":"2407.05842", "GitHub":[ "https:\/\/github.com\/chinmay5\/vessel_diffuse" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":377 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0627_paper.pdf", "bibtext":"@InProceedings{ Fuj_EgoSurgeryPhase_MICCAI2024,\n author = { Fujii, Ryo and Hatano, Masashi and Saito, Hideo and Kajita, Hiroki },\n title = { { EgoSurgery-Phase: A Dataset of Surgical Phase Recognition from Egocentric Open Surgery Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Surgical phase recognition has gained significant attention due to its potential to offer solutions to numerous demands of the modern operating room. However, most existing methods concentrate on minimally invasive surgery (MIS), leaving surgical phase recognition for open surgery understudied. This discrepancy is primarily attributed to the scarcity of publicly available open surgery video datasets for surgical phase recognition. To address this issue, we introduce a new egocentric open surgery video dataset for phase recognition, named Egosurgery-Phase. This dataset comprises 15 hours of real open surgery videos spanning 9 distinct surgical phases all captured using an egocentric camera attached to the surgeon\u2019s head. In addition to video, the Egosurgery-Phase offers eye gaze. As far as we know, it is the first real open surgery video dataset for surgical phase recognition publicly available. Furthermore, inspired by the notable success of masked autoencoders (MAEs) in video understanding tasks (e.g., action recognition), we propose a gaze-guided masked autoencoder (GGMAE). Considering the regions where surgeons\u2019 gaze focuses are often critical for surgical phase recognition (e.g., surgical field), in our GGMAE, the gaze information acts as an empirical semantic richness prior to guiding the masking process, promoting better attention to semantically rich spatial regions. GGMAE significantly improves the previous state-of-the-art recognition method (6.4% in Jaccard) and the masked autoencoder-based method (3.1% in Jaccard) on Egosurgery-Phase. The dataset will be released at https:\/\/github.com\/Fujiry0\/EgoSurgery.", "title":"EgoSurgery-Phase: A Dataset of Surgical Phase Recognition from Egocentric Open Surgery Videos", "authors":[ "Fujii, Ryo", "Hatano, Masashi", "Saito, Hideo", "Kajita, Hiroki" ], "id":"Conference", "arxiv_id":"2405.19644", "GitHub":[ "https:\/\/github.com\/Fujiry0\/EgoSurgery" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":378 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0143_paper.pdf", "bibtext":"@InProceedings{ Zha_AFoundation_MICCAI2024,\n author = { Zhang, Xinru and Ou, Ni and Basaran, Berke Doga and Visentin, Marco and Qiao, Mengyun and Gu, Renyang and Ouyang, Cheng and Liu, Yaou and Matthews, Paul M. and Ye, Chuyang and Bai, Wenjia },\n title = { { A Foundation Model for Brain Lesion Segmentation with Mixture of Modality Experts } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Brain lesion segmentation plays an essential role in neurological research and diagnosis. As brain lesions can be caused by various pathological alterations, different types of brain lesions tend to manifest with different characteristics on different imaging modalities. Due to this complexity, brain lesion segmentation methods are often developed in a task-specific manner. A specific segmentation model is developed for a particular lesion type and imaging modality. However, the use of task-specific models requires predetermination of the lesion type and imaging modality, which complicates their deployment in real-world scenarios. In this work, we propose a universal foundation model for 3D brain lesion segmentation, which can automatically segment different types of brain lesions for input data of various imaging modalities. We formulate a novel Mixture of Modality Experts (MoME) framework with multiple expert networks attending to different imaging modalities. A hierarchical gating network combines the expert predictions and fosters expertise collaboration. Furthermore, we introduce a curriculum learning strategy during training to avoid the degeneration of each expert network and preserve their specialization. We evaluated the proposed method on nine brain lesion datasets, encompassing five imaging modalities and eight lesion types. The results show that our model outperforms state-of-the-art universal models and provides promising generalization to unseen datasets.", "title":"A Foundation Model for Brain Lesion Segmentation with Mixture of Modality Experts", "authors":[ "Zhang, Xinru", "Ou, Ni", "Basaran, Berke Doga", "Visentin, Marco", "Qiao, Mengyun", "Gu, Renyang", "Ouyang, Cheng", "Liu, Yaou", "Matthews, Paul M.", "Ye, Chuyang", "Bai, Wenjia" ], "id":"Conference", "arxiv_id":"2405.10246", "GitHub":[ "https:\/\/github.com\/ZhangxinruBIT\/MoME" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":379 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2517_paper.pdf", "bibtext":"@InProceedings{ Kim_Multimodal_MICCAI2024,\n author = { Kim, Junsik and Shi, Zhiyi and Jeong, Davin and Knittel, Johannes and Yang, Helen Y. and Song, Yonghyun and Li, Wanhua and Li, Yicong and Ben-Yosef, Dalit and Needleman, Daniel and Pfister, Hanspeter },\n title = { { Multimodal Learning for Embryo Viability Prediction in Clinical IVF } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"In clinical In-Vitro Fertilization (IVF), identifying the most viable embryo for transfer is important to increasing the likelihood of a successful pregnancy. Traditionally, this process involves embryologists manually assessing embryos\u2019 static morphological features at specific intervals using light microscopy. This manual evaluation is not only time-intensive and costly, due to the need for expert analysis, but also inherently subjective, leading to variability in the selection process. To address these challenges, we develop a multimodal model that leverages both time-lapse video data and Electronic Health Records (EHRs) to predict embryo viability. A key challenge of our research is to effectively combine time-lapse video and EHR data, given their distinct modality characteristic. We comprehensively analyze our multimodal model with various modality inputs and integration approaches. Our approach will enable fast and automated embryo viability predictions in scale for clinical IVF.", "title":"Multimodal Learning for Embryo Viability Prediction in Clinical IVF", "authors":[ "Kim, Junsik", "Shi, Zhiyi", "Jeong, Davin", "Knittel, Johannes", "Yang, Helen Y.", "Song, Yonghyun", "Li, Wanhua", "Li, Yicong", "Ben-Yosef, Dalit", "Needleman, Daniel", "Pfister, Hanspeter" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mibastro\/MMIVF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":380 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3005_paper.pdf", "bibtext":"@InProceedings{ Yan_Airway_MICCAI2024,\n author = { Yang, Xuan and Chen, Lingyu and Zheng, Yuchao and Ma, Longfei and Chen, Fang and Ning, Guochen and Liao, Hongen },\n title = { { Airway segmentation based on topological structure enhancement using multi-task learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Airway segmentation in chest computed tomography (CT) images is critical for tracheal disease diagnosis and surgical navigation. However, airway segmentation is challenging due to complex tree structures and branches of different sizes. To enhance airway integrity and reduce fractures during bronchus segmentation, we propose a novel network for airway segmentation, using centerline detection as an auxiliary task to enhance topology awareness. The network introduces a topology embedding interactive module to emphasize the geometric properties of tracheal connections and reduce bronchial breakage. In addition, the proposed topology-enhanced attention module captures contextual and spatial information to improve bronchioles segmentation. In this paper, we conduct qualitative and quantitative experiments on two public datasets. Compared to several state-of-the-art algorithms, our method outperforms in detecting terminal bronchi and ensuring the continuity of the entire trachea while maintaining comparable segmentation accuracy. Our code is available at https:\/\/github.com\/xyang-11\/airway_seg.", "title":"Airway segmentation based on topological structure enhancement using multi-task learning", "authors":[ "Yang, Xuan", "Chen, Lingyu", "Zheng, Yuchao", "Ma, Longfei", "Chen, Fang", "Ning, Guochen", "Liao, Hongen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xyang-11\/airway_seg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":381 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0375_paper.pdf", "bibtext":"@InProceedings{ Wu_MMRetinal_MICCAI2024,\n author = { Wu, Ruiqi and Zhang, Chenran and Zhang, Jianle and Zhou, Yi and Zhou, Tao and Fu, Huazhu },\n title = { { MM-Retinal: Knowledge-Enhanced Foundational Pretraining with Fundus Image-Text Expertise } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Current fundus image analysis models are predominantly built for specific tasks relying on individual datasets. The learning process is usually based on data-driven paradigm without prior knowledge. To address this issue, we propose MM-Retinal, a multi-modal dataset that encompasses high-quality image-text pairs collected from professional fundus diagram books. Moreover, enabled by MM-Retinal, we present a novel Knowledge-enhanced foundational pretraining model which incorporates Fundus Image-Text expertise, called KeepFIT. It is designed with image similarity-guided text revision and mixed training strategy to infuse expert knowledge. Our proposed fundus foundation model achieves state-of-the-art performance across six unseen downstream tasks and holds excellent generalization ability in zero-shot and few-shot scenarios. MM-Retinal and KeepFIT are available at \\href{https:\/\/github.com\/lxirich\/MM-Retinal}{here}", "title":"MM-Retinal: Knowledge-Enhanced Foundational Pretraining with Fundus Image-Text Expertise", "authors":[ "Wu, Ruiqi", "Zhang, Chenran", "Zhang, Jianle", "Zhou, Yi", "Zhou, Tao", "Fu, Huazhu" ], "id":"Conference", "arxiv_id":"2405.11793", "GitHub":[ "https:\/\/github.com\/lxirich\/MM-Retinal" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":382 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3658_paper.pdf", "bibtext":"@InProceedings{ Abd_ANew_MICCAI2024,\n author = { Abdelhalim, Ibrahim and Abou El-Ghar, Mohamed and Dwyer, Amy and Ouseph, Rosemary and Contractor, Sohail and El-Baz, Ayman },\n title = { { A New Non-Invasive AI-Based Diagnostic System for Automated Diagnosis of Acute Renal Rejection in Kidney Transplantation: Analysis of ADC Maps Extracted from Matched 3D Iso-Regions of the Transplanted Kidney } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Acute allograft rejection poses a significant challenge in kidney transplantation, the primary remedy for end-stage renal disease. Timely detection is crucial for intervention and graft preservation. A notable obstacle involves ensuring consistency across Diffusion Weighted Magnetic Resonance Imaging (DW-MRI) scanning protocols at various Tesla levels. To tackle this, we propose a novel, non-invasive framework for automated diagnosis of acute renal rejection using DW-MRI. Our method comprises several key steps: Initially, we register the segmented kidney across different scanners, aligning them from the cortex to the medulla. Afterwards, the Apparent Diffusion Coefficient (ADC) is estimated for the segmented kidney. Then, the ADC maps are partitioned into a 3D iso-surface from the cortex to the medulla using the fast-marching level sets method. Next, the Cumulative Distribution Function (CDF) of the ADC for each iso-surface is computed, and Spearman correlation is applied to these CDFs. Finally, we introduce a Transformer-based Correlations to Classes Converter (T3C) model to leverage these correlations for distinguishing between normal and acutely rejected transplants. Evaluation on a cohort of 94 subjects (40 with acute renal rejection and 54 control subjects) yields promising results, with a mean accuracy of 98.723%, a mean sensitivity of 97%, and a mean specificity of 100%, employing a leave-one-subject testing approach. These findings underscore the effectiveness and robustness of our proposed framework.", "title":"A New Non-Invasive AI-Based Diagnostic System for Automated Diagnosis of Acute Renal Rejection in Kidney Transplantation: Analysis of ADC Maps Extracted from Matched 3D Iso-Regions of the Transplanted Kidney", "authors":[ "Abdelhalim, Ibrahim", "Abou El-Ghar, Mohamed", "Dwyer, Amy", "Ouseph, Rosemary", "Contractor, Sohail", "El-Baz, Ayman" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":383 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2156_paper.pdf", "bibtext":"@InProceedings{ Li_SiFT_MICCAI2024,\n author = { Li, Xuyang and Zhang, Weizhuo and Yu, Yue and Zheng, Wei-Shi and Zhang, Tong and Wang, Ruixuan },\n title = { { SiFT: A Serial Framework with Textual Guidance for Federated Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has been extensively used in various medi-\ncal scenarios. However, the data-hungry nature of deep learning poses significant challenges in the medical domain, where data is often private, scarce, and imbalanced. Federated learning emerges as a solution to this paradox. Federated learning aims to collaborate multiple data owners (i.e., clients) for training a unified model without requiring clients to share their private data with others. In this study, we propose an innovative framework called SiFT (Serial Framework with Textual guidance) for federated learning. In our framework, the model is trained in a cyclic sequential manner inspired by the study of continual learning. In particular, with a continual learning strategy which employs a long-term model and a short-term model to emulate human\u2019s long-term and short-term memory, class knowledge across clients can be effectively accumulated through the serial learning process. In addition, one pre-trained biomedical language model is utilized to guide the training of the short-term model by embedding textual prior knowledge of each image class into the classifier head. Experimental evaluations on three public medical image datasets demonstrate that the proposed SiFT achieves superior performance with lower communication cost compared to traditional federated learning methods. The source code is available at https:\/\/openi.pcl.ac.cn\/OpenMedIA\/SiFT.git.", "title":"SiFT: A Serial Framework with Textual Guidance for Federated Learning", "authors":[ "Li, Xuyang", "Zhang, Weizhuo", "Yu, Yue", "Zheng, Wei-Shi", "Zhang, Tong", "Wang, Ruixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":384 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2217_paper.pdf", "bibtext":"@InProceedings{ Kuj_Label_MICCAI2024,\n author = { Kujawa, Aaron and Dorent, Reuben and Ourselin, Sebastien and Vercauteren, Tom },\n title = { { Label merge-and-split: A graph-colouring approach for memory-efficient brain parcellation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Whole brain parcellation requires inferring hundreds of segmentation labels in large image volumes and thus presents significant practical challenges for deep learning approaches. We introduce label merge-and-split, a method that first greatly reduces the effective number of labels required for learning-based whole brain parcellation and then recovers original labels. Using a greedy graph colouring algorithm, our method automatically groups and merges multiple spatially separate labels prior to model training and inference. The merged labels may be semantically unrelated. A deep learning model is trained to predict merged labels. At inference time, original labels are restored using atlas-based influence regions. In our experiments, the proposed approach reduces the number of labels by up to 68% while achieving segmentation accuracy comparable to the baseline method without label merging and splitting. Moreover, model training and inference times as well as GPU memory requirements were reduced significantly. The proposed method can be applied to all semantic segmentation tasks with a large number of spatially separate classes within an atlas-based prior.", "title":"Label merge-and-split: A graph-colouring approach for memory-efficient brain parcellation", "authors":[ "Kujawa, Aaron", "Dorent, Reuben", "Ourselin, Sebastien", "Vercauteren, Tom" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":385 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1201_paper.pdf", "bibtext":"@InProceedings{ Gha_XTranPrune_MICCAI2024,\n author = { Ghadiri, Ali and Pagnucco, Maurice and Song, Yang },\n title = { { XTranPrune: eXplainability-aware Transformer Pruning for Bias Mitigation in Dermatological Disease Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Numerous studies have demonstrated the effectiveness of\ndeep learning models in medical image analysis. However, these models often exhibit performance disparities across different demographic cohorts, undermining their trustworthiness in clinical settings. While previous efforts have focused on bias mitigation techniques for traditional encoders, the increasing use of transformers in the medical domain calls for novel fairness enhancement methods. Additionally, the efficacy of explainability\nmethods in improving model fairness remains unexplored. To address these gaps, we introduce XTranPrune, a bias mitigation method tailored for vision transformers. Leveraging state-of-the-art explainability techniques, XTranPrune generates a pruning mask to remove discriminatory modules while preserving performance-critical ones. Our experiments on two skin lesion datasets demonstrate the superior performance of XTranPrune across multiple fairness metrics. The code can be found at https:\/\/github.com\/AliGhadirii\/XTranPrune.", "title":"XTranPrune: eXplainability-aware Transformer Pruning for Bias Mitigation in Dermatological Disease Classification", "authors":[ "Ghadiri, Ali", "Pagnucco, Maurice", "Song, Yang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/AliGhadirii\/XTranPrune" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":386 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2832_paper.pdf", "bibtext":"@InProceedings{ Zhe_Curriculum_MICCAI2024,\n author = { Zheng, Xiuqi and Zhang, Yuhang and Zhang, Haoran and Liang, Hongrui and Bao, Xueqi and Jiang, Zhuqing and Lao, Qicheng },\n title = { { Curriculum Prompting Foundation Models for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Adapting large pre-trained foundation models, e.g., SAM, for medical image segmentation remains a significant challenge. A crucial step involves the formulation of a series of specialized prompts that incorporate specific clinical instructions. Past works have been heavily reliant on a singular type of prompt for each instance, necessitating manual input of an ideally correct prompt, which is less efficient. To tackle this issue, we propose to utilize prompts of different granularity, which are sourced from original images to provide a broader scope of clinical insights. However, combining prompts of varying types can pose a challenge due to potential conflicts. In response, we have designed a coarse-to-fine mechanism, referred to as curriculum prompting, that progressively integrates prompts of different types. Through extensive experiments on three public medical datasets across various modalities, we demonstrate the effectiveness of our proposed approach, which not only automates the prompt generation process but also yields superior performance compared to other SAM-based medical image segmentation methods. Code will be available at: https:\/\/github.com\/AnnaZzz-zxq\/Curriculum-Prompting.", "title":"Curriculum Prompting Foundation Models for Medical Image Segmentation", "authors":[ "Zheng, Xiuqi", "Zhang, Yuhang", "Zhang, Haoran", "Liang, Hongrui", "Bao, Xueqi", "Jiang, Zhuqing", "Lao, Qicheng" ], "id":"Conference", "arxiv_id":"2409.00695", "GitHub":[ "https:\/\/github.com\/AnnaZzz-zxq\/Curriculum-Prompting" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":387 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1682_paper.pdf", "bibtext":"@InProceedings{ Pan_HemodynamicDriven_MICCAI2024,\n author = { Pan, Xiang and Nie, Shiyun and Lv, Tianxu and Li, Lihua },\n title = { { Hemodynamic-Driven Multi-Prototypes Learning for One-Shot Segmentation in Breast Cancer DCE-MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"In dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) of the breast, tumor segmentation is pivotal in screening and prognostic evaluation. However, automated segmentation is typically limited by a large amount of fully annotated data, and the multi-connected regions and complicated contours of tumors also pose a significant challenge. Existing few-shot segmentation methods tend to overfit the targets of base categories, resulting in inaccurate segmentation boundaries. In this work, we propose a hemodynamic-driven multi-prototypes network (HDMPNet) for one-shot segmentation that generates high-quality segmentation maps even for tumors of variable size, appearance, and shape. Specifically, a parameter-free module, called adaptive superpixel clustering (ASC), is designed to extract multi-prototypes by aggregating similar feature vectors for the multi-connected regions. Moreover, we develop a cross-fusion decoder (CFD) for optimizing boundary segmentation, which involves reweighting and aggregating support and query features. Besides, a bidirectional Gate Recurrent Unit is employed to acquire pharmacokinetic knowledge, subsequently driving the ASC and CFD modules. Experiments on two public breast cancer datasets show that our method yields higher segmentation performance than the existing state-of-the-art methods. The source code will be available on https:\/\/github.com\/Medical-AI-Lab-of-JNU\/HDMP.", "title":"Hemodynamic-Driven Multi-Prototypes Learning for One-Shot Segmentation in Breast Cancer DCE-MRI", "authors":[ "Pan, Xiang", "Nie, Shiyun", "Lv, Tianxu", "Li, Lihua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Medical-AI-Lab-of-JNU\/HDMP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":388 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3246_paper.pdf", "bibtext":"@InProceedings{ Emr_Learning_MICCAI2024,\n author = { Emre, Taha and Chakravarty, Arunava and Lachinov, Dmitrii and Rivail, Antoine and Schmidt-Erfurth, Ursula and Bogunovi\u0107, Hrvoje },\n title = { { Learning Temporally Equivariance for Degenerative Disease Progression in OCT by Predicting Future Representations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contrastive pretraining provides robust representations by ensuring their invariance to different image transformations while simultaneously preventing representational collapse. Equivariant contrastive learning, on the other hand, provides representations sensitive to specific image transformations while remaining invariant to others. By introducing equivariance to time-induced transformations, such as disease-related anatomical changes in longitudinal imaging, the model can effectively capture such changes in the representation space. In this work, we propose a Time-equivariant Contrastive Learning (TC) method. First, an encoder embeds two unlabeled scans from different time points of the same patient into the representation space. Next, a temporal equivariance module is trained to predict the representation of a later visit based on the representation from one of the previous visits and the corresponding time interval with a novel regularization loss term while preserving the invariance property to irrelevant image transformations. On a large longitudinal dataset, our model clearly outperforms existing equivariant contrastive methods in predicting progression from intermediate age-related macular degeneration (AMD) to advanced wet-AMD within a specified time-window.", "title":"Learning Temporally Equivariance for Degenerative Disease Progression in OCT by Predicting Future Representations", "authors":[ "Emre, Taha", "Chakravarty, Arunava", "Lachinov, Dmitrii", "Rivail, Antoine", "Schmidt-Erfurth, Ursula", "Bogunovi\u0107, Hrvoje" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/EmreTaha\/TC-time_equivariant_disease_progression" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":389 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2374_paper.pdf", "bibtext":"@InProceedings{ Zha_PhyDiff_MICCAI2024,\n author = { Zhang, Juanhua and Yan, Ruodan and Perelli, Alessandro and Chen, Xi and Li, Chao },\n title = { { Phy-Diff: Physics-guided Hourglass Diffusion Model for Diffusion MRI Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion MRI (dMRI) is an important neuroimaging technique with high acquisition costs. Deep learning approaches have been used to enhance dMRI and predict diffusion biomarkers through undersampled dMRI. To generate more comprehensive raw dMRI, generative adversarial network based methods are proposed to include b-values and b-vectors as conditions, but they are limited by unstable training and less desirable diversity. The emerging diffusion model (DM) promises to improve generative performance. However, it remains challenging to include essential information in conditioning DM for more relevant generation, i.e., the physical principles of dMRI and white matter tract structures. In this study, we propose a physics-guided diffusion model to generate high-quality dMRI. Our model introduces the physical principles of dMRI in the noise evolution in the diffusion process and introduce a query-based conditional mapping within the difussion model. In addition, to enhance the anatomical fine detials of the generation, we introduce the XTRACT atlas as prior of white matter tracts by adopting an adapter technique. Our experiment results show that our method outperforms other state-of-the-art methods and has the potential to advance dMRI enhancement.", "title":"Phy-Diff: Physics-guided Hourglass Diffusion Model for Diffusion MRI Synthesis", "authors":[ "Zhang, Juanhua", "Yan, Ruodan", "Perelli, Alessandro", "Chen, Xi", "Li, Chao" ], "id":"Conference", "arxiv_id":"2406.03002", "GitHub":[ "https:\/\/github.com\/Caewinix\/Phy-Diff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":390 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0532_paper.pdf", "bibtext":"@InProceedings{ Zha_CryoSAM_MICCAI2024,\n author = { Zhao, Yizhou and Bian, Hengwei and Mu, Michael and Uddin, Mostofa R. and Li, Zhenyang and Li, Xiang and Wang, Tianyang and Xu, Min },\n title = { { CryoSAM: Training-free CryoET Tomogram Segmentation with Foundation Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cryogenic Electron Tomography (CryoET) is a useful imaging technology in structural biology that is hindered by its need for manual annotations, especially in particle picking. Recent works have endeavored to remedy this issue with few-shot learning or contrastive learning techniques. However, supervised training is still inevitable for them. We instead choose to leverage the power of existing 2D foundation models and present a novel, training-free framework, CryoSAM. In addition to prompt-based single-particle instance segmentation, our approach can automatically search for similar features, facilitating full tomogram semantic segmentation with only one prompt. CryoSAM is composed of two major parts: 1) a prompt-based 3D segmentation system that uses prompts to complete single-particle instance segmentation recursively with Cross-Plane Self-Prompting, and 2) a Hierarchical Feature Matching mechanism that efficiently matches relevant features with extracted tomogram features. They collaborate to enable the segmentation of all particles of one category with just one particle-specific prompt. Our experiments show that CryoSAM outperforms existing works by a significant margin and requires even fewer annotations in particle picking. Further visualizations demonstrate its ability when dealing with full tomogram segmentation for various subcellular structures. Our code is available at: https:\/\/github.com\/xulabs\/aitom", "title":"CryoSAM: Training-free CryoET Tomogram Segmentation with Foundation Models", "authors":[ "Zhao, Yizhou", "Bian, Hengwei", "Mu, Michael", "Uddin, Mostofa R.", "Li, Zhenyang", "Li, Xiang", "Wang, Tianyang", "Xu, Min" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xulabs\/aitom" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":391 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1942_paper.pdf", "bibtext":"@InProceedings{ Yu_UrFound_MICCAI2024,\n author = { Yu, Kai and Zhou, Yang and Bai, Yang and Soh, Zhi Da and Xu, Xinxing and Goh, Rick Siow Mong and Cheng, Ching-Yu and Liu, Yong },\n title = { { UrFound: Towards Universal Retinal Foundation Models via Knowledge-Guided Masked Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Retinal foundation models aim to learn generalizable representations from diverse retinal images, facilitating label-efficient model adaptation across various ophthalmic tasks. Despite their success, current retinal foundation models are generally restricted to a single imaging modality, such as Color Fundus Photography (CFP) or Optical Coherence Tomography (OCT), limiting their versatility. Moreover, these models may struggle to fully leverage expert annotations and overlook the valuable domain knowledge essential for domain-specific representation learning. To overcome these limitations, we introduce UrFound, a retinal foundation model designed to learn universal representations from both multimodal retinal images and domain knowledge. UrFound is equipped with a modality-agnostic image encoder and accepts either CFP or OCT images as inputs. To integrate domain knowledge into representation learning, we encode expert annotation in text supervision and propose a knowledge-guided masked modeling strategy for model pre-training. It involves reconstructing randomly masked patches of retinal images while predicting masked text tokens conditioned on the corresponding image. This approach aligns multimodal images and textual expert annotations within a unified latent space, facilitating generalizable and domain-specific representation learning. Experimental results demonstrate that UrFound exhibits strong generalization ability and data efficiency when adapting to various tasks in retinal image analysis. By training on ~180k retinal images, UrFound significantly outperforms the state-of-the-art retinal foundation model trained on up to 1.6 million unlabelled images across 8 public retinal datasets. Our code and data are available at https:\/\/github.com\/yukkai\/UrFound.", "title":"UrFound: Towards Universal Retinal Foundation Models via Knowledge-Guided Masked Modeling", "authors":[ "Yu, Kai", "Zhou, Yang", "Bai, Yang", "Soh, Zhi Da", "Xu, Xinxing", "Goh, Rick Siow Mong", "Cheng, Ching-Yu", "Liu, Yong" ], "id":"Conference", "arxiv_id":"2408.05618", "GitHub":[ "https:\/\/github.com\/yukkai\/UrFound" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":392 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2275_paper.pdf", "bibtext":"@InProceedings{ Cur_Lobar_MICCAI2024,\n author = { Curiale, Ariel H. and San Jos\u00e9 Est\u00e9par, Ra\u00fal },\n title = { { Lobar Lung Density Embeddings with a Transformer encoder (LobTe) to predict emphysema progression in COPD } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Emphysema is defined as an abnormal alveolar wall destruc- tion exhibits varied extent and distribution within the lung, leading to heterogeneous spatial emphysema distribution. The progression of emphysema leads to decreased gas exchange, resulting in clinical worsening, and has been associated with higher mortality. Despite the ability to diagnose emphysema on CT scans there are no methods to predict its evolution. Our study aims to propose and validate a novel prognostic lobe-based transformer (LobTe) model capable of capturing the complexity and spatial variability of emphysema progression. This model predicts the evolution of emphysema based on %LAA-950 measurements, thereby enhancing our understanding of Chronic Obstructive Pulmonary Disease (COPD). LobTe is specifically tailored to address the spatial heterogeneity in lung destruction via a transformer encoder using lobe embedding fingerprints to maintain global attention according to lobes\u2019 positions. We trained and tested our model using data from 4,612 smokers, both with and without COPD, across all GOLD stages, who had complete baseline and 5-year follow-up data. Our findings from 1,830 COPDGene participants used for testing demonstrate the model\u2019s effectiveness in predicting lung density evolution based on %LAA-950, achieving a Root Mean Squared Error (RMSE) of 2.957%, a correlation coefficient (\u03c1) of 0.643 and a coefficient of determination (R2) of 0.36. The model\u2019s capability to predict changes in lung density over five years from baseline CT scans highlights its potential in the early identification of patients at risk of emphysema progression. Our results suggest that image embeddings derived from baseline CT scans effectively forecast emphysema progression by quantifying lung tissue loss.", "title":"Lobar Lung Density Embeddings with a Transformer encoder (LobTe) to predict emphysema progression in COPD", "authors":[ "Curiale, Ariel H.", "San Jos\u00e9 Est\u00e9par, Ra\u00fal" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":393 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1510_paper.pdf", "bibtext":"@InProceedings{ Par_Automated_MICCAI2024,\n author = { Park, Robin Y. and Windsor, Rhydian and Jamaludin, Amir and Zisserman, Andrew },\n title = { { Automated Spinal MRI Labelling from Reports Using a Large Language Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose a general pipeline to automate the extraction of labels from radiology reports using large language models, which we validate on spinal MRI reports. The efficacy of our method is measured on two distinct conditions: spinal cancer and stenosis. Using open-source models, our method surpasses GPT-4 on a held-out set of reports. Furthermore, we show that the extracted labels can be used to train an imaging model to classify the identified conditions in the accompanying MR scans. Both the cancer and stenosis classifiers trained using automated labels achieve comparable performance to models trained using scans manually annotated by clinicians.", "title":"Automated Spinal MRI Labelling from Reports Using a Large Language Model", "authors":[ "Park, Robin Y.", "Windsor, Rhydian", "Jamaludin, Amir", "Zisserman, Andrew" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/robinyjpark\/AutoLabelClassifier" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":394 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0053_paper.pdf", "bibtext":"@InProceedings{ Jia_Cardiac_MICCAI2024,\n author = { Jiang, Haojun and Sun, Zhenguo and Jia, Ning and Li, Meng and Sun, Yu and Luo, Shaqi and Song, Shiji and Huang, Gao },\n title = { { Cardiac Copilot: Automatic Probe Guidance for Echocardiography with World Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Echocardiography is the only technique capable of real-time imaging of the heart and is vital for diagnosing the majority of cardiac diseases. However, there is a severe shortage of experienced cardiac sonographers, due to the heart\u2019s complex structure and significant operational challenges. To mitigate this situation, we present a Cardiac Copilot system capable of providing real-time probe movement guidance to assist less experienced sonographers in conducting freehand echocardiography. This system can enable non-experts, especially in primary departments and medically underserved areas, to perform cardiac ultrasound examinations, potentially improving global healthcare delivery. The core innovation lies in proposing a data-driven world model, named Cardiac Dreamer, for representing cardiac spatial structures. This world model can provide structure features of any cardiac planes around the current probe position in the latent space, serving as an precise navigation map for autonomous plane localization. We train our model with real-world ultrasound data and corresponding probe motion from 110 routine clinical scans with 151K sample pairs by three certified sonographers. Evaluations on three standard planes with 37K sample pairs demonstrate that the world model can reduce navigation errors by up to 33% and exhibit more stable performance.", "title":"Cardiac Copilot: Automatic Probe Guidance for Echocardiography with World Model", "authors":[ "Jiang, Haojun", "Sun, Zhenguo", "Jia, Ning", "Li, Meng", "Sun, Yu", "Luo, Shaqi", "Song, Shiji", "Huang, Gao" ], "id":"Conference", "arxiv_id":"2406.13165", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":395 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2609_paper.pdf", "bibtext":"@InProceedings{ Aay_Fair_MICCAI2024,\n author = { Aayushman and Gaddey, Hemanth and Mittal, Vidhi and Chawla, Manisha and Gupta, Gagan Raj },\n title = { { Fair and Accurate Skin Disease Image Classification by Alignment with Clinical Labels } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models have achieved great success in automating skin lesion diagnosis. However, the ethnic disparity in these models\u2019 predictions needs to be addressed before deployment of these models. We introduce a novel approach: PatchAlign, to enhance skin condition image classification accuracy and fairness through alignment with clinical text representations of skin conditions. PatchAlign uses Graph Optimal Transport (\\texttt{GOT}) Loss as a regularizer to perform cross-domain alignment. The representations thus obtained are robust and generalize well across skin tones, even with limited training samples. To reduce the effect of noise\/artifacts in clinical dermatology images, we propose a learnable Masked Graph Optimal Transport for cross-domain alignment that further improves the fairness metrics.", "title":"Fair and Accurate Skin Disease Image Classification by Alignment with Clinical Labels", "authors":[ "Aayushman", "Gaddey, Hemanth", "Mittal, Vidhi", "Chawla, Manisha", "Gupta, Gagan Raj" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/aayushmanace\/PatchAlign24" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":396 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0501_paper.pdf", "bibtext":"@InProceedings{ Li_PASTA_MICCAI2024,\n author = { Li, Yitong and Yakushev, Igor and Hedderich, Dennis M. and Wachinger, Christian },\n title = { { PASTA: Pathology-Aware MRI to PET CroSs-modal TrAnslation with Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Positron emission tomography (PET) is a well-established functional imaging technique for diagnosing brain disorders. However, PET\u2019s high costs and radiation exposure limit its widespread use. In contrast, magnetic resonance imaging (MRI) does not have these limitations. Although it also captures neurodegenerative changes, MRI is a less sensitive diagnostic tool than PET. To close this gap, we aim to generate synthetic PET from MRI. Herewith, we introduce PASTA, a novel pathology-aware image translation framework based on conditional diffusion models. Compared to the state-of-the-art methods, PASTA excels in preserving both structural and pathological details in the target modality, which is achieved through its highly interactive dual-arm architecture and multi-modal condition integration. A cycle exchange consistency and volumetric generation strategy elevate PASTA\u2019s capability to produce high-quality 3D PET scans. Our qualitative and quantitative results confirm that the synthesized PET scans from PASTA not only reach the best quantitative scores but also preserve the pathology correctly. For Alzheimer\u2019s classification, the performance of synthesized scans improves over MRI by 4%, almost reaching the performance of actual PET. Code is available at https:\/\/github.com\/ai-med\/PASTA.", "title":"PASTA: Pathology-Aware MRI to PET CroSs-modal TrAnslation with Diffusion Models", "authors":[ "Li, Yitong", "Yakushev, Igor", "Hedderich, Dennis M.", "Wachinger, Christian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ai-med\/PASTA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":397 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1980_paper.pdf", "bibtext":"@InProceedings{ Aza_EchoTracker_MICCAI2024,\n author = { Azad, Md Abulkalam and Chernyshov, Artem and Nyberg, John and Tveten, Ingrid and Lovstakken, Lasse and Dalen, H\u00e5vard and Grenne, Bj\u00f8rnar and \u00d8stvik, Andreas },\n title = { { EchoTracker: Advancing Myocardial Point Tracking in Echocardiography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tissue tracking in echocardiography is challenging due to the complex cardiac motion and the inherent nature of ultrasound acquisitions. Although optical flow methods are considered state-of-the-art (SOTA), they struggle with long-range tracking, noise occlusions, and drift throughout the cardiac cycle. Recently, novel learning-based point tracking techniques have been introduced to tackle some of these issues. In this paper, we build upon these techniques and introduce EchoTracker, a two-fold coarse-to-fine model that facilitates the tracking of queried points on a tissue surface across ultrasound image sequences. The architecture contains a preliminary coarse initialization of the trajectories, followed by reinforcement iterations based on fine-grained appearance changes. It is efficient, light, and can run on mid-range GPUs. Experiments demonstrate that the model outperforms SOTA methods, with an average position accuracy of 67% and a median trajectory error of 2.86 pixels. Furthermore, we show a relative improvement of 25% when using our model to calculate the global longitudinal strain (GLS) in a clinical test-retest dataset compared to other methods. This implies that learning-based point tracking can potentially improve performance and yield a higher diagnostic and prognostic value for clinical measurements than current techniques. Our source code is available at: https:\/\/github.com\/\/.", "title":"EchoTracker: Advancing Myocardial Point Tracking in Echocardiography", "authors":[ "Azad, Md Abulkalam", "Chernyshov, Artem", "Nyberg, John", "Tveten, Ingrid", "Lovstakken, Lasse", "Dalen, H\u00e5vard", "Grenne, Bj\u00f8rnar", "\u00d8stvik, Andreas" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/riponazad\/echotracker\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":398 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3535_paper.pdf", "bibtext":"@InProceedings{ Sul_HAMILQA_MICCAI2024,\n author = { Sultan, K. M. Arefeen and Hisham, Md Hasibul Husain and Orkild, Benjamin and Morris, Alan and Kholmovski, Eugene and Bieging, Erik and Kwan, Eugene and Ranjan, Ravi and DiBella, Ed and Elhabian, Shireen Y. },\n title = { { HAMIL-QA: Hierarchical Approach to Multiple Instance Learning for Atrial LGE MRI Quality Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The accurate evaluation of left atrial fibrosis via high-quality 3D Late Gadolinium Enhancement (LGE) MRI is crucial for atrial fibrillation management but is hindered by factors like patient movement and imaging variability. The pursuit of automated LGE MRI quality assessment is critical for enhancing diagnostic accuracy, standardizing evaluations, and improving patient outcomes. The deep learning models aimed at automating this process face significant challenges due to the scarcity of expert annotations, high computational costs, and the need to capture subtle diagnostic details in highly variable images. This study introduces HAMIL-QA, a multiple instance learning (MIL) framework, designed to overcome these obstacles. HAMIL-QA employs a hierarchical bag and sub-bag structure that allows for targeted analysis within sub-bags and aggregates insights at the volume level. This hierarchical MIL approach reduces reliance on extensive annotations, lessens computational load, and ensures clinically relevant quality predictions by focusing on diagnostically critical image features. Our experiments show that HAMIL-QA surpasses existing MIL methods and traditional supervised approaches in accuracy, AUROC, and F1-Score on an LGE MRI scan dataset, demonstrating its potential as a scalable solution for LGE MRI quality assessment automation. The code is available at: https:\/\/github.com\/arf111\/HAMIL-QA", "title":"HAMIL-QA: Hierarchical Approach to Multiple Instance Learning for Atrial LGE MRI Quality Assessment", "authors":[ "Sultan, K. M. Arefeen", "Hisham, Md Hasibul Husain", "Orkild, Benjamin", "Morris, Alan", "Kholmovski, Eugene", "Bieging, Erik", "Kwan, Eugene", "Ranjan, Ravi", "DiBella, Ed", "Elhabian, Shireen Y." ], "id":"Conference", "arxiv_id":"2407.07254", "GitHub":[ "https:\/\/github.com\/arf111\/HAMIL-QA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":399 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0333_paper.pdf", "bibtext":"@InProceedings{ Zha_MediCLIP_MICCAI2024,\n author = { Zhang, Ximiao and Xu, Min and Qiu, Dehui and Yan, Ruixin and Lang, Ning and Zhou, Xiuzhuang },\n title = { { MediCLIP: Adapting CLIP for Few-shot Medical Image Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the field of medical decision-making, precise anomaly detection in medical imaging plays a pivotal role in aiding clinicians. However, previous work is reliant on large-scale datasets for training anomaly detection models, which increases the development cost. This paper first focuses on the task of medical image anomaly detection in the few-shot setting, which is critically significant for the medical field where data collection and annotation are both very expensive. We propose an innovative approach, MediCLIP, which adapts the CLIP model to few-shot medical image anomaly detection through self-supervised fine-tuning. Although CLIP, as a vision-language model, demonstrates outstanding zero-\/few-shot performance on various downstream tasks, it still falls short in the anomaly detection of medical images. To address this, we design a series of medical image anomaly synthesis tasks to simulate common disease patterns in medical imaging, transferring the powerful generalization capabilities of CLIP to the task of medical image anomaly detection. When only few-shot normal medical images are provided, MediCLIP achieves state-of-the-art performance in anomaly detection and location compared to other methods. Extensive experiments on three distinct medical anomaly detection tasks have demonstrated the superiority of our approach. The code is available at https:\/\/github.com\/cnulab\/MediCLIP.", "title":"MediCLIP: Adapting CLIP for Few-shot Medical Image Anomaly Detection", "authors":[ "Zhang, Ximiao", "Xu, Min", "Qiu, Dehui", "Yan, Ruixin", "Lang, Ning", "Zhou, Xiuzhuang" ], "id":"Conference", "arxiv_id":"2405.11315", "GitHub":[ "https:\/\/github.com\/cnulab\/MediCLIP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":400 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1007_paper.pdf", "bibtext":"@InProceedings{ Lin_Stable_MICCAI2024,\n author = { Lin, Tianyu and Chen, Zhiguang and Yan, Zhonghao and Yu, Weijiang and Zheng, Fudan },\n title = { { Stable Diffusion Segmentation for Biomedical Images with Single-step Reverse Process } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models have demonstrated their effectiveness across various generative tasks. However, when applied to medical image segmentation, these models encounter several challenges, including significant resource and time requirements. They also necessitate a multi-step reverse process and multiple samples to produce reliable predictions. To address these challenges, we introduce the first latent diffusion segmentation model, named SDSeg, built upon stable diffusion (SD). SDSeg incorporates a straightforward latent estimation strategy to facilitate a single-step reverse process and utilizes latent fusion concatenation to remove the necessity for multiple samples. Extensive experiments indicate that SDSeg surpasses existing state-of-the-art methods on five benchmark datasets featuring diverse imaging modalities. Remarkably, SDSeg is capable of generating stable predictions with a solitary reverse step and sample, epitomizing the model\u2019s stability as implied by its name.\nThe code is available at https:\/\/github.com\/lin-tianyu\/Stable-Diffusion-Seg.", "title":"Stable Diffusion Segmentation for Biomedical Images with Single-step Reverse Process", "authors":[ "Lin, Tianyu", "Chen, Zhiguang", "Yan, Zhonghao", "Yu, Weijiang", "Zheng, Fudan" ], "id":"Conference", "arxiv_id":"2406.18361", "GitHub":[ "https:\/\/github.com\/lin-tianyu\/Stable-Diffusion-Seg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":401 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1081_paper.pdf", "bibtext":"@InProceedings{ Ace_The_MICCAI2024,\n author = { Acebes, Cesar and Moustafa, Abdel Hakim and Camara, Oscar and Galdran, Adrian },\n title = { { The Centerline-Cross Entropy Loss for Vessel-Like Structure Segmentation: Better Topology Consistency Without Sacrificing Accuracy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Achieving accurate vessel segmentation in medical images is crucial for various clinical applications, but current methods often struggle to balance topological consistency (preserving vessel network structure) with segmentation accuracy (overlap with ground-truth).\nAlthough various strategies have been proposed to address this challenge, they typically necessitate significant modifications to network architecture, more annotations, or entail prohibitive computational costs, providing only partial topological improvements.\nThe clDice loss was recently proposed as an elegant and efficient alternative to preserve topology in tubular structure segmentation. However, segmentation accuracy is penalized and it lacks robustness to noisy annotations, mirroring the limitations of the conventional Dice loss. This work introduces the centerline-Cross Entropy (clCE) loss function, a novel approach which capitalizes on the robustness of Cross-Entropy loss and the topological focus of centerline-Dice loss, promoting optimal vessel overlap while maintaining faithful network structure. Extensive evaluations on diverse publicly available datasets (2D\/3D, retinal\/coronary) demonstrate clCE\u2019s effectiveness. Compared to existing losses, clCE achieves superior overlap with ground truth while simultaneously improving vascular connectivity. This paves the way for more accurate and clinically relevant vessel segmentation, particularly in complex 3D scenarios.\nWe share an implementation of the clCE loss function in https:\/\/github.com\/cesaracebes\/centerline_CE.", "title":"The Centerline-Cross Entropy Loss for Vessel-Like Structure Segmentation: Better Topology Consistency Without Sacrificing Accuracy", "authors":[ "Acebes, Cesar", "Moustafa, Abdel Hakim", "Camara, Oscar", "Galdran, Adrian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cesaracebes\/centerline_CE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":402 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2377_paper.pdf", "bibtext":"@InProceedings{ Suk_LaBGATr_MICCAI2024,\n author = { Suk, Julian and Imre, Baris and Wolterink, Jelmer M. },\n title = { { LaB-GATr: geometric algebra transformers for large biomedical surface and volume meshes } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Many anatomical structures can be described by surface or volume meshes. Machine learning is a promising tool to extract information from these 3D models. However, high-fidelity meshes often contain hundreds of thousands of vertices, which creates unique challenges in building deep neural network architectures. Furthermore, patient-specific meshes may not be canonically aligned which limits the generalisation of machine learning algorithms. We propose LaB-GATr, a transfomer neural network with geometric tokenisation that can effectively learn with large-scale (bio-)medical surface and volume meshes through sequence compression and interpolation. Our method extends the recently proposed geometric algebra transformer (GATr) and thus respects all Euclidean symmetries, i.e. rotation, translation and reflection, effectively mitigating the problem of canonical alignment between patients. LaB-GATr achieves state-of-the-art results on three tasks in cardiovascular hemodynamics modelling and neurodevelopmental phenotype prediction, featuring meshes of up to 200,000 vertices. Our results demonstrate that LaB-GATr is a powerful architecture for learning with high-fidelity meshes which has the potential to enable interesting downstream applications. Our implementation is publicly available.", "title":"LaB-GATr: geometric algebra transformers for large biomedical surface and volume meshes", "authors":[ "Suk, Julian", "Imre, Baris", "Wolterink, Jelmer M." ], "id":"Conference", "arxiv_id":"2403.07536", "GitHub":[ "https:\/\/github.com\/sukjulian\/lab-gatr" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":403 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2579_paper.pdf", "bibtext":"@InProceedings{ Ma_Symmetry_MICCAI2024,\n author = { Ma, Yang and Wang, Dongang and Liu, Peilin and Masters, Lynette and Barnett, Michael and Cai, Weidong and Wang, Chenyu },\n title = { { Symmetry Awareness Encoded Deep Learning Framework for Brain Imaging Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The heterogeneity of neurological conditions, ranging from structural anomalies to functional impairments, presents a significant challenge in medical imaging analysis tasks. Moreover, the limited availability of well-annotated datasets constrains the development of robust analysis models. Against this backdrop, this study introduces a novel approach leveraging the inherent anatomical symmetrical features of the human brain to enhance the subsequent detection and segmentation analysis for brain diseases. A novel Symmetry-Aware Cross-Attention (SACA) module is proposed to encode symmetrical features of left and right hemispheres, and a proxy task to detect symmetrical features as the Symmetry-Aware Head (SAH) is proposed, which guides the pretraining of the whole network on a vast 3D brain imaging dataset comprising both healthy and diseased brain images across various MRI and CT. Through meticulous experimentation on downstream tasks, including both classification and segmentation for brain diseases, our model demonstrates superior performance over state-of-the-art methodologies, particularly highlighting the significance of symmetry-aware learning. Our findings advocate for the effectiveness of incorporating symmetry awareness into pretraining and set a new benchmark for medical imaging analysis, promising significant strides toward accurate and efficient diagnostic processes. Code is available at https:\/\/github.com\/bitMyron\/sa-swin.", "title":"Symmetry Awareness Encoded Deep Learning Framework for Brain Imaging Analysis", "authors":[ "Ma, Yang", "Wang, Dongang", "Liu, Peilin", "Masters, Lynette", "Barnett, Michael", "Cai, Weidong", "Wang, Chenyu" ], "id":"Conference", "arxiv_id":"2407.08948", "GitHub":[ "https:\/\/github.com\/bitMyron\/sa-swin" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":404 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1155_paper.pdf", "bibtext":"@InProceedings{ Pan_SinoSynth_MICCAI2024,\n author = { Pang, Yunkui and Liu, Yilin and Chen, Xu and Yap, Pew-Thian and Lian, Jun },\n title = { { SinoSynth: A Physics-based Domain Randomization Approach for Generalizable CBCT Image Enhancement } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cone Beam Computed Tomography (CBCT) finds diverse applications in medicine. Ensuring high image quality in CBCT scans is essential for accurate diagnosis and treatment delivery. Yet, the susceptibility of CBCT images to noise and artifacts undermines both their usefulness and reliability. Existing methods typically address CBCT artifacts through image-to-image translation approaches. These methods, however, are limited by the artifact types present in the training data, which may not cover the complete spectrum of CBCT degradations stemming from variations in imaging protocols. Gathering additional data to encompass all possible scenarios can often pose a challenge. To address this, we present SinoSynth, a physics-based degradation model that simulates various CBCT-specific artifacts to generate a diverse set of synthetic CBCT images from high-quality CT images, without requiring pre-aligned data. Through extensive experiments, we demonstrate that several different generative networks trained on our synthesized data achieve remarkable results on heterogeneous multi-institutional datasets, outperforming even the same networks trained on actual data. We further show that our degradation model conveniently provides an avenue to enforce anatomical constraints in conditional generative models, yielding high-quality and structure-preserving synthetic CT images.", "title":"SinoSynth: A Physics-based Domain Randomization Approach for Generalizable CBCT Image Enhancement", "authors":[ "Pang, Yunkui", "Liu, Yilin", "Chen, Xu", "Yap, Pew-Thian", "Lian, Jun" ], "id":"Conference", "arxiv_id":"2409.18355", "GitHub":[ "https:\/\/github.com\/Pangyk\/SinoSynth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":405 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2281_paper.pdf", "bibtext":"@InProceedings{ Lit_TADM_MICCAI2024,\n author = { Litrico, Mattia and Guarnera, Francesco and Giuffrida, Mario Valerio and Rav\u00ec, Daniele and Battiato, Sebastiano },\n title = { { TADM: Temporally-Aware Diffusion Model for Neurodegenerative Progression on Brain MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generating realistic images to accurately predict changes in the structure of brain MRI can be a crucial tool for clinicians. Such applications can help assess patients\u2019 outcomes and analyze how diseases progress at the individual level. However, existing methods developed for this task present some limitations. Some approaches attempt to model the distribution of MRI scans directly by conditioning the model on patients\u2019 ages, but they fail to explicitly capture the relationship between structural changes in the brain and time intervals, especially on age-unbalanced datasets. Other approaches simply rely on interpolation between scans, which limits their clinical application as they do not predict future MRIs. To address these challenges, we propose a Temporally-Aware Diffusion Model (TADM), which introduces a novel approach to accurately infer progression in brain MRIs. TADM learns the distribution of structural changes in terms of intensity differences between scans and combines the prediction of these changes with the initial baseline scans to generate future MRIs. Furthermore, during training, we propose to leverage a pre-trained Brain-Age Estimator (BAE) to refine the model\u2019s training process, enhancing its ability to produce accurate MRIs that match the expected age gap between baseline and generated scans. Our assessment, conducted on 634 subjects from the OASIS-3 dataset, uses similarity metrics and region sizes computed by comparing predicted and real follow-up scans on 3 relevant brain regions. TADM achieves large improvements over existing approaches, with an average decrease of 24% in region size error and an improvement of 4% in similarity metrics. These evaluations demonstrate the improvement of our model in mimicking temporal brain neurodegenerative progression compared to existing methods. We believe that our approach will significantly benefit clinical applications, such as predicting patient outcomes or improving treatments for patients.", "title":"TADM: Temporally-Aware Diffusion Model for Neurodegenerative Progression on Brain MRI", "authors":[ "Litrico, Mattia", "Guarnera, Francesco", "Giuffrida, Mario Valerio", "Rav\u00ec, Daniele", "Battiato, Sebastiano" ], "id":"Conference", "arxiv_id":"2406.12411", "GitHub":[ "https:\/\/github.com\/MattiaLitrico\/TADM-Temporally-Aware-Diffusion-Model-for-Neurodegenerative-Progression-on-Brain-MRI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":406 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0376_paper.pdf", "bibtext":"@InProceedings{ Sar_VisionBased_MICCAI2024,\n author = { Sarwin, Gary and Carretta, Alessandro and Staartjes, Victor and Zoli, Matteo and Mazzatenta, Diego and Regli, Luca and Serra, Carlo and Konukoglu, Ender },\n title = { { Vision-Based Neurosurgical Guidance: Unsupervised Localization and Camera-Pose Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Localizing oneself during endoscopic procedures can be problematic due to the lack of distinguishable textures and landmarks, as well as difficulties due to the endoscopic device such as a limited field of view and challenging lighting conditions. Expert knowledge shaped by years of experience is required for localization within the human body during endoscopic procedures. In this work, we present a deep learning method based on anatomy recognition, that constructs a surgical path in an unsupervised manner from surgical videos, modelling relative location and variations due to different viewing angles. At inference time, the model can map unseen video frames on the path and estimate the viewing angle, aiming to provide guidance, for instance, to reach a particular destination. We test the method on a dataset consisting of surgical videos of pituitary surgery, i.e. transsphenoidal adenomectomy, as well as on a synthetic dataset. An online tool that lets researchers upload their surgical videos to obtain anatomy detections and the weights of the trained YOLOv7 model are available at: https:\/\/surgicalvision.bmic.ethz.ch.", "title":"Vision-Based Neurosurgical Guidance: Unsupervised Localization and Camera-Pose Prediction", "authors":[ "Sarwin, Gary", "Carretta, Alessandro", "Staartjes, Victor", "Zoli, Matteo", "Mazzatenta, Diego", "Regli, Luca", "Serra, Carlo", "Konukoglu, Ender" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":407 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3400_paper.pdf", "bibtext":"@InProceedings{ Chr_Confidence_MICCAI2024,\n author = { Christodoulou, Evangelia and Reinke, Annika and Houhou, Rola and Kalinowski, Piotr and Erkan, Selen and Sudre, Carole H. and Burgos, Ninon and Boutaj, Sofie\u0300ne and Loizillon, Sophie and Solal, Mae\u0308lys and Rieke, Nicola and Cheplygina, Veronika and Antonelli, Michela and Mayer, Leon D. and Tizabi, Minu D. and Cardoso, M. Jorge and Simpson, Amber and J\u00e4ger, Paul F. and Kopp-Schneider, Annette and Varoquaux, Gae\u0308l and Colliot, Olivier and Maier-Hein, Lena },\n title = { { Confidence intervals uncovered: Are we ready for real-world medical imaging AI? } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical imaging is spearheading the AI transformation of healthcare. Performance reporting is key to determine which methods should be translated into clinical practice. Frequently, broad conclusions are simply derived from mean performance values. In this paper, we argue that this common practice is often a misleading simplification as it ignores performance variability. Our contribution is threefold. (1) Analyzing all MICCAI segmentation papers (n = 221) published in 2023, we first observe that more than 50% of papers do not assess performance variability at all. Moreover, only one (0.5%) paper reported confidence intervals (CIs) for model performance. (2) To address the reporting bottleneck, we show that the unreported standard deviation (SD) in segmentation papers can be approximated by a second-order polynomial function of the mean Dice similarity coefficient (DSC). Based on external validation data from 56 previous MICCAI challenges, we demonstrate that this approximation can accurately reconstruct the CI of a method using information provided in publications. (3) Finally, we reconstructed 95% CIs around the mean DSC of MICCAI 2023 segmentation papers. The median CI width was 0.03 which is three times larger than the median performance gap between the first and second ranked method. For more than 60% of papers, the mean performance of the second-ranked method was within the CI of the first-ranked method. We conclude that current publications typically do not provide sufficient evidence to support which models could potentially be translated into clinical practice.", "title":"Confidence intervals uncovered: Are we ready for real-world medical imaging AI?", "authors":[ "Christodoulou, Evangelia", "Reinke, Annika", "Houhou, Rola", "Kalinowski, Piotr", "Erkan, Selen", "Sudre, Carole H.", "Burgos, Ninon", "Boutaj, Sofie\u0300ne", "Loizillon, Sophie", "Solal, Mae\u0308lys", "Rieke, Nicola", "Cheplygina, Veronika", "Antonelli, Michela", "Mayer, Leon D.", "Tizabi, Minu D.", "Cardoso, M. Jorge", "Simpson, Amber", "J\u00e4ger, Paul F.", "Kopp-Schneider, Annette", "Varoquaux, Gae\u0308l", "Colliot, Olivier", "Maier-Hein, Lena" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/IMSY-DKFZ\/CI_uncovered" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":408 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2078_paper.pdf", "bibtext":"@InProceedings{ Che_Pathological_MICCAI2024,\n author = { Chen, Fuqiang and Zhang, Ranran and Zheng, Boyun and Sun, Yiwen and He, Jiahui and Qin, Wenjian },\n title = { { Pathological Semantics-Preserving Learning for H&E-to-IHC Virtual Staining } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Conventional hematoxylin-eosin (H&E) staining is limited to revealing cell morphology and distribution, whereas immunohistochemical (IHC) staining provides precise and specific visualization of protein activation at the molecular level. Virtual staining technology has emerged as a solution for highly efficient IHC examination, which directly transforms H&E-stained images to IHC-stained images. However, virtual staining is challenged by the insufficient mining of pathological semantics and the spatial misalignment of pathological semantics. To address these issues, we propose the Pathological Semantics-Preserving Learning method for Virtual Staining (PSPStain), which directly incorporates the molecular-level semantic information and enhances semantics interaction despite any spatial inconsistency. Specifically, PSPStain comprises two novel learning strategies: 1) Protein-Aware Learning Strategy (PALS) with Focal Optical Density (FOD) map maintains the coherence of protein expression level, which represents molecular-level semantic information; 2) Prototype-Consistent Learning Strategy (PCLS), which enhances cross-image semantic interaction by prototypical consistency learning. We evaluate PSPStain on two public datasets using five metrics: three clinically relevant metrics and two for image quality. Extensive experiments indicate that PSPStain outperforms current state-of-the-art H&E-to-IHC virtual staining methods and demonstrates a high pathological correlation between the staging of real and virtual stains. Code is available at https:\/\/github.com\/ccitachi\/PSPStain.", "title":"Pathological Semantics-Preserving Learning for H E-to-IHC Virtual Staining", "authors":[ "Chen, Fuqiang", "Zhang, Ranran", "Zheng, Boyun", "Sun, Yiwen", "He, Jiahui", "Qin, Wenjian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ccitachi\/PSPStain" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":409 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1620_paper.pdf", "bibtext":"@InProceedings{ Lin_Revisiting_MICCAI2024,\n author = { Lin, Xian and Wang, Zhehao and Yan, Zengqiang and Yu, Li },\n title = { { Revisiting Self-Attention in Medical Transformers via Dependency Sparsification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Vision transformer (ViT), powered by token-to-token self-attention, has demonstrated superior performance across various vision tasks. The large and even global receptive field obtained via dense self-attention, allows it to build stronger representations than CNN. However, compared to natural images, both the amount and the signal-to-noise ratio of medical images are small, often resulting in poor convergence of vanilla self-attention and further introducing non-negligible noise from extensive unrelated tokens. Besides, token-to-token self-attention requires heavy memory and computation consumption, hindering its deployment onto various computing platforms. In this paper, we propose a dynamic self-attention sparsification method for medical transformers by merging similar feature tokens for dependency distillation under the guidance of feature prototypes. Specifically, we first generate feature prototypes with genetic relationships by simulating the process of cell division, where the number of prototypes is much smaller than that of feature tokens. Then, in each self-attention layer, key and value tokens are grouped based on their distance from feature prototypes. Tokens in the same group, together with the corresponding feature prototype, would be merged into a new prototype according to both feature importance and grouping confidence. Finally, query tokens build pair-wise dependency with such newly-updated prototypes for fewer but global and more efficient interactions. Extensive experiments on three publicly available datasets demonstrate the effectiveness of our solution, working as a plug-and-play module for joint complexity reduction and performance improvement of various medical transformers. Code is available at https:\/\/github.com\/xianlin7\/DMA.", "title":"Revisiting Self-Attention in Medical Transformers via Dependency Sparsification", "authors":[ "Lin, Xian", "Wang, Zhehao", "Yan, Zengqiang", "Yu, Li" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xianlin7\/DMA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":410 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1834_paper.pdf", "bibtext":"@InProceedings{ Yan_Generalized_MICCAI2024,\n author = { Yan, Zipei and Liang, Zhile and Liu, Zhengji and Wang, Shuai and Chun, Rachel Ka-Man and Li, Jizhou and Kee, Chea-su and Liang, Dong },\n title = { { Generalized Robust Fundus Photography-based Vision Loss Estimation for High Myopia } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"High myopia significantly increases the risk of irreversible vision loss. Traditional perimetry-based visual field (VF) assessment provides systematic quantification of visual loss but it is subjective and time-consuming. Consequently, machine learning models utilizing fundus photographs to estimate VF have emerged as promising alternatives. However, due to the high variability and the limited availability of VF data, existing VF estimation models fail to generalize well, particularly when facing out-of-distribution data across diverse centers and populations. To tackle this challenge, we propose a novel, parameter-efficient framework to enhance the generalized robustness of VF estimation on both in- and out-of-distribution data. Specifically, we design a Refinement-by-Denoising (RED) module for feature refinement and adaptation from pretrained vision models, aiming to learn high-entropy feature representations and to mitigate the domain gap effectively and efficiently. Through independent validation on two distinct real-world datasets from separate centers, our method significantly outperforms existing approaches in RMSE, MAE and correlation coefficient for both internal and external validation. Our proposed framework benefits both in- and out-of-distribution VF estimation, offering significant clinical implications and potential utility in real-world ophthalmic practices.", "title":"Generalized Robust Fundus Photography-based Vision Loss Estimation for High Myopia", "authors":[ "Yan, Zipei", "Liang, Zhile", "Liu, Zhengji", "Wang, Shuai", "Chun, Rachel Ka-Man", "Li, Jizhou", "Kee, Chea-su", "Liang, Dong" ], "id":"Conference", "arxiv_id":"2407.03699", "GitHub":[ "https:\/\/github.com\/yanzipei\/VF_RED" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":411 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1379_paper.pdf", "bibtext":"@InProceedings{ Gao_Loose_MICCAI2024,\n author = { Gao, Tianhong and Song, Jie and Yu, Xiaotian and Zhang, Shengxuming and Liang, Wenjie and Zhang, Hongbin and Li, Ziqian and Zhang, Wenzhuo and Zhang, Xiuming and Zhong, Zipeng and Song, Mingli and Feng, Zunlei },\n title = { { Loose Lesion Location Self-supervision Enhanced Colorectal Cancer Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Early diagnosis of colorectal cancer (CRC) is crucial for improving survival and quality of life. While computed tomography (CT) is a key diagnostic tool, manually screening colon tumors is time-consuming and repetitive for radiologists. Recently, deep learning has shown promise in medical image analysis, but its clinical application is limited by the model\u2019s unexplainability and the need for a large number of finely annotated samples. In this paper, we propose a loose lesion location self-supervision enhanced CRC diagnosis framework to reduce the requirement of fine sample annotations and improve the reliability of prediction results. For both non-contrast and contrast CT, despite potential deviations in imaging positions, the lesion location should be nearly consistent in images of both modalities at the same sequence position. In addition, lesion location in two successive slices is relatively close for the same modality. Therefore, a self-supervision mechanism is devised to enforce lesion location consistency at both temporal and modality levels of CT, reducing the need for fine annotations and enhancing the interpretability of diagnostics. Furthermore, this paper introduces a mask correction loopback strategy to reinforce the interdependence between category label and lesion location, ensuring the reliability of diagnosis. To verify our method\u2019s effectiveness, we collect data from 3,178 CRC patients and 887 healthy controls. Experiment results show that the proposed method not only provides reliable lesion localization but also enhances the classification performance by 1-2%, offering an effective diagnostic tool for CRC. Code is available at https:\/\/github.com\/Gaotianhong\/LooseLocationSS.", "title":"Loose Lesion Location Self-supervision Enhanced Colorectal Cancer Diagnosis", "authors":[ "Gao, Tianhong", "Song, Jie", "Yu, Xiaotian", "Zhang, Shengxuming", "Liang, Wenjie", "Zhang, Hongbin", "Li, Ziqian", "Zhang, Wenzhuo", "Zhang, Xiuming", "Zhong, Zipeng", "Song, Mingli", "Feng, Zunlei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Gaotianhong\/LooseLocationSS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":412 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0747_paper.pdf", "bibtext":"@InProceedings{ Shi_CS3_MICCAI2024,\n author = { Shi, Yi and Tian, Xu-Peng and Wang, Yun-Kai and Zhang, Tie-Yi and Yao, Bing and Wang, Hui and Shao, Yong and Wang, Cen-Cen and Zeng, Rong and Zhan, De-Chuan },\n title = { { CS3: Cascade SAM for Sperm Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated sperm morphology analysis plays a crucial role in the assessment of male fertility, yet its efficacy is often compromised by the challenges in accurately segmenting sperm images. Existing segmentation techniques, including the Segment Anything Model (SAM), are notably inadequate in addressing the complex issue of sperm overlap\u2014a frequent occurrence in clinical samples. Our exploratory studies reveal that modifying image characteristics by removing sperm heads and easily segmentable areas, alongside enhancing the visibility of overlapping regions, markedly enhances SAM\u2019s efficiency in segmenting intricate sperm structures. Motivated by these findings, we present the Cascade SAM for Sperm Segmentation (CS3), an unsupervised approach specifically designed to tackle the issue of sperm overlap. This method employs a cascade application of SAM to segment sperm heads, simple tails, and complex tails in stages. Subsequently, these segmented masks are meticulously matched and joined to construct complete sperm masks. In collaboration with leading medical institutions, we have compiled a dataset comprising approximately 2,000 unlabeled sperm images to fine-tune our method, and secured expert annotations for an additional 240 images to facilitate comprehensive model assessment. Experimental results demonstrate superior performance of CS3 compared to existing methods.", "title":"CS3: Cascade SAM for Sperm Segmentation", "authors":[ "Shi, Yi", "Tian, Xu-Peng", "Wang, Yun-Kai", "Zhang, Tie-Yi", "Yao, Bing", "Wang, Hui", "Shao, Yong", "Wang, Cen-Cen", "Zeng, Rong", "Zhan, De-Chuan" ], "id":"Conference", "arxiv_id":"2407.03772", "GitHub":[ "https:\/\/github.com\/shiy19\/CS3" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":413 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2109_paper.pdf", "bibtext":"@InProceedings{ Jou_HyperSpace_MICCAI2024,\n author = { Joutard, Samuel and Pietsch, Maximilian and Prevost, Raphael },\n title = { { HyperSpace: Hypernetworks for spacing-adaptive image segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical images are often acquired in different settings, requiring harmonization to adapt to the operating point of algorithms. Specifically, to standardize the physical spacing of imaging voxels in heterogeneous inference settings, images are typically resampled before being processed by deep learning models. However, down-sampling results in loss of information, whereas upsampling introduces redundant information leading to inefficient resource utilization. To overcome these issues, we propose to condition segmentation models on the voxel spacing using hypernetworks. Our approach allows processing images at their native resolutions or at resolutions adjusted to the hardware and time constraints at inference time. Our experiments across multiple datasets demonstrate that our approach achieves competitive performance compared to resolution-specific models, while offering greater flexibility for the end user. This also simplifies model development, deployment and maintenance. Our code will be made available at \\url{https:\/\/github.com\/anonymous}.", "title":"HyperSpace: Hypernetworks for spacing-adaptive image segmentation", "authors":[ "Joutard, Samuel", "Pietsch, Maximilian", "Prevost, Raphael" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ImFusionGmbH\/HyperSpace" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":414 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2092_paper.pdf", "bibtext":"@InProceedings{ Hu_SALI_MICCAI2024,\n author = { Hu, Qiang and Yi, Zhenyu and Zhou, Ying and Peng, Fang and Liu, Mei and Li, Qiang and Wang, Zhiwei },\n title = { { SALI: Short-term Alignment and Long-term Interaction Network for Colonoscopy Video Polyp Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Colonoscopy videos provide richer information in polyp segmentation for rectal cancer diagnosis. However, the endoscope\u2019s fast moving and close-up observing make the current methods suffer from large spatial incoherence and continuous low-quality frames, and thus yield limited segmentation accuracy. In this context, we focus on robust video polyp segmentation by enhancing the adjacent feature consistency and rebuilding the reliable polyp representation. To achieve this goal, we in this paper propose SALI network, a hybrid of Short-term Alignment Module (SAM) and Long-term Interaction Module (LIM). The SAM learns spatial-aligned features of adjacent frames via deformable convolution and further harmonizes them to capture more stable short-term polyp representation. In case of low-quality frames, the LIM stores the historical polyp representations as a long-term memory bank, and explores the retrospective relations to interactively rebuild more reliable polyp features for the current segmentation. Combing SAM and LIM, the SALI network of video segmentation shows a great robustness to the spatial variations and low-visual cues. Benchmark on the large-scale SUN-SEC verifies the superiority of SALI over the current state-of-the-arts by improving Dice by 2.1%, 2.5%, 4.1% and 1.9%, for the four test sub-sets, respectively. Codes are at https:\/\/github.com\/Scatteredrain\/SALI.", "title":"SALI: Short-term Alignment and Long-term Interaction Network for Colonoscopy Video Polyp Segmentation", "authors":[ "Hu, Qiang", "Yi, Zhenyu", "Zhou, Ying", "Peng, Fang", "Liu, Mei", "Li, Qiang", "Wang, Zhiwei" ], "id":"Conference", "arxiv_id":"2406.13532", "GitHub":[ "https:\/\/github.com\/Scatteredrain\/SALI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":415 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1533_paper.pdf", "bibtext":"@InProceedings{ Sto_Towards_MICCAI2024,\n author = { Stolte, Skylar E. and Indahlastari, Aprinda and Albizu, Alejandro and Woods, Adam J. and Fang, Ruogu },\n title = { { Towards tDCS Digital Twins using Deep Learning-based Direct Estimation of Personalized Electrical Field Maps from T1-Weighted MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Transcranial Direct Current Stimulation (tDCS) is a non-invasive brain stimulation method that applies neuromodulatory effects to the brain via low-intensity, direct current. It has shown possible positive effects in areas such as depression, substance use disorder, anxiety, and pain. Unfortunately, mixed trial results have delayed the field\u2019s progress. Electrical current field approximation provides a way for tDCS researchers to estimate how an individual will respond to specific tDCS parameters. Publicly available physics-based stimulators have led to much progress; however, they can be error-prone, susceptible to quality issues (e.g., poor segmentation), and take multiple hours to run. Digital functional twins provide a method of estimating brain function in response to stimuli using computational methods. We seek to implement this idea for individualized tDCS. Hence, this work provides a proof-of-concept for generating electrical field maps for tDCS directly from T1-weighted magnetic resonance images (MRIs). Our deep learning method employs special loss regularizations to improve the model\u2019s generalizability and\ncalibration across individual scans and electrode montages. Users may enter a desired electrode montage in addition to the unique MRI for a custom output. Our dataset includes 442 unique individual heads from individuals across the adult lifespan. The pipeline can generate results on the scale of minutes, unlike physics-based systems that can take 1-3 hours. Overall, our methods will help streamline the process of individual current dose estimations for improved tDCS interventions.", "title":"Towards tDCS Digital Twins using Deep Learning-based Direct Estimation of Personalized Electrical Field Maps from T1-Weighted MRI", "authors":[ "Stolte, Skylar E.", "Indahlastari, Aprinda", "Albizu, Alejandro", "Woods, Adam J.", "Fang, Ruogu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":416 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3730_paper.pdf", "bibtext":"@InProceedings{ P\u00e9r_MuST_MICCAI2024,\n author = { P\u00e9rez, Alejandra and Rodr\u00edguez, Santiago and Ayobi, Nicol\u00e1s and Aparicio, Nicol\u00e1s and Dessevres, Eug\u00e9nie and Arbel\u00e1ez, Pablo },\n title = { { MuST: Multi-Scale Transformers for Surgical Phase Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Phase recognition in surgical videos is crucial for enhancing computer-aided surgical systems as it enables automated understanding of sequential procedural stages. Existing methods often rely on fixed temporal windows for video analysis to identify dynamic surgical phases. Thus, they struggle to simultaneously capture short-, mid-, and long-term information necessary to fully understand complex surgical procedures. To address these issues, we propose Multi-Scale Transformers for Surgical Phase Recognition (MuST), a novel Transformer-based approach that combines a Multi-Term Frame encoder with a Temporal Consistency Module to capture information across multiple temporal scales of a surgical video. Our Multi-Term Frame Encoder computes interdependencies across a hierarchy of temporal scales by sampling sequences at increasing strides around the frame of interest. Furthermore, we employ a long-term Transformer encoder over the frame embeddings to further enhance long-term reasoning. MuST achieves higher performance than previous state-of-the-art methods on three different public benchmarks.", "title":"MuST: Multi-Scale Transformers for Surgical Phase Recognition", "authors":[ "P\u00e9rez, Alejandra", "Rodr\u00edguez, Santiago", "Ayobi, Nicol\u00e1s", "Aparicio, Nicol\u00e1s", "Dessevres, Eug\u00e9nie", "Arbel\u00e1ez, Pablo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/BCV-Uniandes\/MuST" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":417 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0850_paper.pdf", "bibtext":"@InProceedings{ Pal_Convex_MICCAI2024,\n author = { Pal, Jimut B. and Awate, Suyash P. },\n title = { { Convex Segments for Convex Objects using DNN Boundary Tracing and Graduated Optimization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Image segmentation often involves objects of interest that are biologically known to be convex shaped. While typical deep-neural-networks (DNNs) for object segmentation ignore object properties relating to shape, the DNNs that employ shape information fail to enforce hard constraints on shape. We design a brand-new DNN framework that guarantees convexity of the output object-segment by leveraging fundamental geometrical insights into the boundaries of convex-shaped objects. Moreover, we design our framework to build on typical existing DNNs for per-pixel segmentation, while maintaining simplicity in loss-term formulation and maintaining frugality in model size and training time. Results using six publicly available datasets demonstrates that our DNN framework, with little overheads, provides significant benefits in the robust segmentation of convex objects in out-of-distribution images.", "title":"Convex Segments for Convex Objects using DNN Boundary Tracing and Graduated Optimization", "authors":[ "Pal, Jimut B.", "Awate, Suyash P." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":418 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2215_paper.pdf", "bibtext":"@InProceedings{ Li_Universal_MICCAI2024,\n author = { Li, Liu and Wang, Hanchun and Baugh, Matthew and Ma, Qiang and Zhang, Weitong and Ouyang, Cheng and Rueckert, Daniel and Kainz, Bernhard },\n title = { { Universal Topology Refinement for Medical Image Segmentation with Polynomial Feature Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Although existing medical image segmentation methods provide impressive pixel-wise accuracy, they often neglect topological correctness, making their segmentations unusable for many downstream tasks. One option is to retrain such models whilst including a topology-driven loss component. However, this is computationally expensive and often impractical. A better solution would be to have a versatile plug-and-play topology refinement method that is compatible with any domain-specific segmentation pipeline. Directly training a post-processing model to mitigate topological errors often fails as such models tend to be biased towards the topological errors of a target segmentation network. The diversity of these errors is confined to the information provided by a labelled training set, which is especially problematic for small datasets. Our method solves this problem by training a model-agnostic topology refinement network with synthetic segmentations that cover a wide variety of topological errors. Inspired by the Stone-Weierstrass theorem, we synthesize topology-perturbation masks with randomly sampled coefficients of orthogonal polynomial bases, which ensures a complete and unbiased representation. Practically, we verified the efficiency and effectiveness of our methods as being compatible with multiple families of polynomial bases, and show evidence that our universal plug-and-play topology refinement network outperforms both existing topology-driven learning-based and post-processing methods. We also show that combining our method with learning-based models provides an effortless add-on, which can further improve the performance of existing approaches.", "title":"Universal Topology Refinement for Medical Image Segmentation with Polynomial Feature Synthesis", "authors":[ "Li, Liu", "Wang, Hanchun", "Baugh, Matthew", "Ma, Qiang", "Zhang, Weitong", "Ouyang, Cheng", "Rueckert, Daniel", "Kainz, Bernhard" ], "id":"Conference", "arxiv_id":"2409.09796", "GitHub":[ "https:\/\/github.com\/smilell\/Universal-Topology-Refinement" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":419 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0407_paper.pdf", "bibtext":"@InProceedings{ Zha_SHAN_MICCAI2024,\n author = { Zhang, Ruixuan and Lu, Wenhuan and Guan, Cuntai and Gao, Jie and Wei, Xi and Li, Xuewei },\n title = { { SHAN: Shape Guided Network for Thyroid Nodule Ultrasound Cross-Domain Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmentation models for thyroid ultrasound images are challenged by domain gaps across multi-center data. Some methods have been proposed to address this issue by enforcing consistency across multi-domains or by simulating domain gaps using augmented single-domain. Among them, single-domain generalization methods offer a more universal solution, but their heavy reliance on the data augmentation causes two issues for ultrasound image segmentation. Firstly, the corruption in data augmentation may affect the distribution of grayscale values with diagnostic significant, leading to a decline in model\u2019s segmentation ability. The second is the real domain gap between ultrasound images is difficult to be simulated, resulting in features still correlate with domain, which in turn prevents the construction of the domain-independent latent space. To address these, given that the shape distribution of nodules is task-relevant but domain-independent, the SHape-prior Affine Network (SHAN) is proposed. SHAN serves shape prior as a stable latent mapping space, learning aspect ratio, size, and location of nodules through affine transformation of prior. Thus, our method enhances the segmentation capability and cross-domain generalization of model without any data augmentation methods. Additionally, SHAN is designed to be a plug-and-play method that can improve the performance of segmentation models with an encoder-decoder structure. Our experiments are performed on the public dataset TN3K and a private dataset TUI with 6 domains. By combining SHAN with several segmentation methods and comparing them with other single-domain generalization methods, it can be proved that SHAN performs optimally on both source and target domain data.", "title":"SHAN: Shape Guided Network for Thyroid Nodule Ultrasound Cross-Domain Segmentation", "authors":[ "Zhang, Ruixuan", "Lu, Wenhuan", "Guan, Cuntai", "Gao, Jie", "Wei, Xi", "Li, Xuewei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":420 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2081_paper.pdf", "bibtext":"@InProceedings{ Bu_DnFPlane_MICCAI2024,\n author = { Bu, Ran and Xu, Chenwei and Shan, Jiwei and Li, Hao and Wang, Guangming and Miao, Yanzi and Wang, Hesheng },\n title = { { DnFPlane For Efficient and High-Quality 4D Reconstruction of Deformable Tissues } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstruction of deformable tissues in robotic surgery from endoscopic stereo videos holds great significance for a variety of clinical applications. Existing methods primarily focus on enhancing inference speed, overlooking depth distortion issues in reconstruction results, particularly in regions occluded by surgical instruments. This may lead to misdiagnosis and surgical misguidance. In this paper, we propose an efficient algorithm designed to address the reconstruction challenges arising from depth distortion in complex scenarios. Unlike previous methods that treat each feature plane equally in the dynamic and static field, our framework guides the static field with the dynamic field, generating a dynamic-mask to filter features at the time level. This allows the network to focus on more active dynamic features, reducing depth distortion. In addition, we design a module to address dynamic blurring. Using the dynamic-mask as a guidance, we iteratively refine color values through Gated Recurrent Units (GRU), improving the clarity of tissues detail in the reconstructed results. Experiments on a public endoscope dataset demonstrate that our method outperforms existing state-of-the-art methods without compromising training time. Furthermore, our approach shows outstanding reconstruction performance in occluded regions, making it a more reliable solution in medical scenarios. Code is available: https:\/\/github.com\/CUMT-IRSI\/DnFPlane.git.", "title":"DnFPlane For Efficient and High-Quality 4D Reconstruction of Deformable Tissues", "authors":[ "Bu, Ran", "Xu, Chenwei", "Shan, Jiwei", "Li, Hao", "Wang, Guangming", "Miao, Yanzi", "Wang, Hesheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUMT-IRSI\/DnFPlane.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":421 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1812_paper.pdf", "bibtext":"@InProceedings{ Du_RETCLIP_MICCAI2024,\n author = { Du, Jiawei and Guo, Jia and Zhang, Weihang and Yang, Shengzhu and Liu, Hanruo and Li, Huiqi and Wang, Ningli },\n title = { { RET-CLIP: A Retinal Image Foundation Model Pre-trained with Clinical Diagnostic Reports } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Vision-Language Foundation model is increasingly investigated in the fields of computer vision and natural language processing, yet its exploration in ophthalmology and broader medical applications remains limited. The challenge is the lack of labeled data for the training of foundation model. To handle this issue, a CLIP-style retinal image foundation model is developed in this paper. Our foundation model, RET-CLIP, is specifically trained on a dataset of 193,865 patients to extract general features of color fundus photographs (CFPs), employing a tripartite optimization strategy to focus on left eye, right eye, and patient level to reflect real-world clinical scenarios. Extensive experiments demonstrate that RET-CLIP outperforms existing benchmarks across eight diverse datasets spanning four critical diagnostic categories: diabetic retinopathy, glaucoma, multiple disease diagnosis, and multi-label classification of multiple diseases, which demonstrate the performance and generality of our foundation model. We will release our pre-trained model publicly in support of further research.", "title":"RET-CLIP: A Retinal Image Foundation Model Pre-trained with Clinical Diagnostic Reports", "authors":[ "Du, Jiawei", "Guo, Jia", "Zhang, Weihang", "Yang, Shengzhu", "Liu, Hanruo", "Li, Huiqi", "Wang, Ningli" ], "id":"Conference", "arxiv_id":"2405.14137", "GitHub":[ "https:\/\/github.com\/sStonemason\/RET-CLIP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":422 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2694_paper.pdf", "bibtext":"@InProceedings{ Nam_InstaSAM_MICCAI2024,\n author = { Nam, Siwoo and Namgung, Hyun and Jeong, Jaehoon and Luna, Miguel and Kim, Soopil and Chikontwe, Philip and Park, Sang Hyun },\n title = { { InstaSAM: Instance-aware Segment Any Nuclei Model with Point Annotations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Weakly supervised nuclei segmentation methods have been proposed to simplify the demanding labeling process by primarily depending on point annotations. These methods generate pseudo labels for training based on given points, but their accuracy is often limited by inaccurate pseudo labels. Even though there have been attempts to improve performance by utilizing power of foundation model e.g., Segment Anything Model (SAM), these approaches require more precise guidance (e.g., box), and lack of ability to distinguish individual nuclei instances. To this end, we propose InstaSAM, a novel weakly supervised nuclei instance segmentation method that utilizes confidence of prediction as a guide while leveraging the powerful representation of SAM. Specifically, we use point prompts to initially generate rough pseudo instance maps and fine-tune the adapter layers in image encoder. To exclude unreliable instances, we selectively extract segmented cells with high confidence from pseudo instance segmentation and utilize these for the training of binary segmentation and distance maps. Owing to their shared use of the image encoder, the binary map, distance map, and pseudo instance map benefit from complementary updates. Our experimental results demonstrate that our method significantly outperforms state-of-the-art methods and is robust in few-shot, shifted point, and cross-domain settings. The code will be available upon publication.", "title":"InstaSAM: Instance-aware Segment Any Nuclei Model with Point Annotations", "authors":[ "Nam, Siwoo", "Namgung, Hyun", "Jeong, Jaehoon", "Luna, Miguel", "Kim, Soopil", "Chikontwe, Philip", "Park, Sang Hyun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":423 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0141_paper.pdf", "bibtext":"@InProceedings{ Xia_Data_MICCAI2024,\n author = { Xiao, Anqi and Han, Keyi and Shi, Xiaojing and Tian, Jie and Hu, Zhenhua },\n title = { { Data Augmentation with Multi-armed Bandit on Image Deformations Improves Fluorescence Glioma Boundary Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The recognition of glioma boundary is challenging as a diffused growthing malignant tumor. Although fluorescence molecular imaging, especially in the second near-infrared window (NIR-II, 1000-1700 nm), helps improve surgical outcomes, fast and precise recognition remains in demand. Data-driven deep learning technology shows great promise in providing objective, fast, and precise recognition for glioma boundaries, but the lack of data poses challenges for designing effective models. Automatic data augmentation can improve the representation of small-scale datasets without requiring extensive prior information, which is suitable for fluorescence-based glioma boundary recognition. We propose Explore and Exploit Augment (EEA) based on multi-armed bandit for image deformations, enabling dynamic policy adjustment during training. Additionally, images captured in white light and the first near-infrared window (NIR-I, 700-900 nm) are introduced to further enhance performance. Experiments demonstrate that EEA improves the generalization of four types of models for glioma boundary recognition, suggesting significant potential for aiding in medical image classification. Code is available at https:\/\/github.com\/ainieli\/EEA.", "title":"Data Augmentation with Multi-armed Bandit on Image Deformations Improves Fluorescence Glioma Boundary Recognition", "authors":[ "Xiao, Anqi", "Han, Keyi", "Shi, Xiaojing", "Tian, Jie", "Hu, Zhenhua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ainieli\/EEA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":424 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0026_paper.pdf", "bibtext":"@InProceedings{ Gui_TailEnhanced_MICCAI2024,\n author = { Gui, Shuangchun and Wang, Zhenkun },\n title = { { Tail-Enhanced Representation Learning for Surgical Triplet Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Surgical triplets recognition aims to identify instruments, verbs, and targets in a single video frame, while establishing associations among these components. Since this task has severe imbalanced class distribution, precisely identifying tail classes becomes a critical challenge. To cope with this issue, existing methods leverage knowledge distillation to facilitate tail triplet recognition. However, these methods overlook the low inter-triplet feature variance, diminishing the model\u2019s confidence in identifying classes. As a technique for learning discriminative features across instances, contrastive learning (CL) shows great potential in identifying triplets. Under this imbalanced class distribution, directly applying CL presents two problems: 1) multiple activities in one image make instance feature learning to interference from other classes, and 2) limited training samples of tail classes may lead to inadequate semantic capturing. In this paper, we propose a tail-enhanced representation learning (TERL) method to address these problems. TERL employs a disentangle module to acquire instance-level features in a single image. Obtaining these disentangled instances, those from tail classes are selected to conduct CL, which captures discriminative features by enabling a global memory bank. During CL, we further conduct semantic enhancement to each tail class. This generates component class prototypes based on the global bank, thus providing additional component information to tail classes. We evaluate the performance of TERL on the 5-fold cross-validation split of the CholecT45 dataset. The experimental results consistently demonstrate the superiority of TERL over state-of-the-art methods.", "title":"Tail-Enhanced Representation Learning for Surgical Triplet Recognition", "authors":[ "Gui, Shuangchun", "Wang, Zhenkun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CIAM-Group\/ComputerVision_Codes\/tree\/main\/TERL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":425 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1306_paper.pdf", "bibtext":"@InProceedings{ Jai_Follow_MICCAI2024,\n author = { Jain, Kshitiz and Rangarajan, Krithika and Arora, Chetan },\n title = { { Follow the Radiologist: Clinically Relevant Multi-View Cues for Breast Cancer Detection from Mammograms } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated breast cancer detection using deep learning based object detection models have achieved high sensitivity, but often struggles with high false positive rate. While radiologists possess the ability to analyze and identify malignant masses in mammograms using multiple views, it poses a challenge for deep learning based models. Inspired by how object appearance behave across multiple views in natural images, researchers have proposed several techniques to exploit geometric correspondence between location of a tumor in multiple views and reduce false positives. We question clinical relevance of such cues. We show that there is inherent ambiguity in geometric correspondence between the two mammography views, because of which accurate geometric alignment is not possible. Instead, we propose to match morphological cues between the two views. Harnessing recent advances for object detection approaches in computer vision, we adapt a state-of-the-art transformer architecture to use proposed morphological cues. We claim that proposed cues are more agreeable with a clinician\u2019s approach compared to the geometrical alignment. Using our approach, we show a significant improvement of 5% in sensitivity at 0.3 False Positives per Image (FPI) on benchmark INBreast dataset. We also report an improvement of 2% and 1% on in-house and benchmark DDSM dataset respectively. Realizing lack of open source code base in this area impeding reproducible research, we are publicly releasing source code and pretrained models for this work.", "title":"Follow the Radiologist: Clinically Relevant Multi-View Cues for Breast Cancer Detection from Mammograms", "authors":[ "Jain, Kshitiz", "Rangarajan, Krithika", "Arora, Chetan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":426 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0151_paper.pdf", "bibtext":"@InProceedings{ Shi_ShapeMambaEM_MICCAI2024,\n author = { Shi, Ruohua and Pang, Qiufan and Ma, Lei and Duan, Lingyu and Huang, Tiejun and Jiang, Tingting },\n title = { { ShapeMamba-EM: Fine-Tuning Foundation Model with Local Shape Descriptors and Mamba Blocks for 3D EM Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Electron microscopy (EM) imaging offers unparalleled resolution for analyzing neural tissues, crucial for uncovering the intricacies of synaptic connections and neural processes fundamental to understanding behavioral mechanisms. Recently, the foundation models have demonstrated impressive performance across numerous natural and medical image segmentation tasks. However, applying these foundation models to EM segmentation faces significant challenges due to domain disparities. This paper presents ShapeMamba-EM, a specialized fine-tuning method for 3D EM segmentation, which employs adapters for long-range dependency modeling and an encoder for local shape description within the original foundation model. This approach effectively addresses the unique volumetric and morphological complexities of EM data. Tested over a wide range of EM images, covering five segmentation tasks and 10 datasets, ShapeMamba-EM outperforms existing methods, establishing a new standard in EM image segmentation and enhancing the understanding of neural tissue architecture.", "title":"ShapeMamba-EM: Fine-Tuning Foundation Model with Local Shape Descriptors and Mamba Blocks for 3D EM Image Segmentation", "authors":[ "Shi, Ruohua", "Pang, Qiufan", "Ma, Lei", "Duan, Lingyu", "Huang, Tiejun", "Jiang, Tingting" ], "id":"Conference", "arxiv_id":"2408.14114", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":427 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3610_paper.pdf", "bibtext":"@InProceedings{ Wu_TLRN_MICCAI2024,\n author = { Wu, Nian and Xing, Jiarui and Zhang, Miaomiao },\n title = { { TLRN: Temporal Latent Residual Networks For Large Deformation Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"This paper presents a novel approach, termed Temporal Latent Residual Network (TLRN), to predict a sequence of deformation fields in time-series image registration. The challenge of registering time-series images often lies in the occurrence of large motions, especially when images differ significantly from a reference (e.g., the start of a cardiac cycle compared to the peak stretching phase). To achieve accurate and robust registration results, we leverage the nature of motion continuity and exploit the temporal smoothness in consecutive image frames. Our proposed TLRN highlights a temporal residual network with residual blocks carefully designed in latent deformation spaces, which are parameterized by time-sequential initial velocity fields. We treat a sequence of residual blocks over time as a dynamic training system, where each block is designed to learn the residual function between desired deformation features and current input accumulated from previous time frames. We validate the effectivenss of TLRN on both synthetic data and real-world cine cardiac magnetic resonance (CMR) image videos. Our experimental results shows that TLRN is able to achieve substantially improved registration accuracy compared to the state-of-the-art. Our code is publicly available at https:\/\/github.com\/nellie689\/TLRN.", "title":"TLRN: Temporal Latent Residual Networks For Large Deformation Image Registration", "authors":[ "Wu, Nian", "Xing, Jiarui", "Zhang, Miaomiao" ], "id":"Conference", "arxiv_id":"2407.11219", "GitHub":[ "https:\/\/github.com\/nellie689\/TLRN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":428 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0930_paper.pdf", "bibtext":"@InProceedings{ Ouy_Promptbased_MICCAI2024,\n author = { Ouyang, Xi and Gu, Dongdong and Li, Xuejian and Zhou, Wenqi and Chen, Qianqian and Zhan, Yiqiang and Zhou, Xiang and Shi, Feng and Xue, Zhong and Shen, Dinggang },\n title = { { Prompt-based Segmentation Model of Anatomical Structures and Lesions in CT Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models have been successfully developed for various medical image segmentation tasks. However, individual models are commonly developed using specific data along with a substantial amount of annotations, ignoring the internal connections between different tasks. To overcome this limitation, we integrate such a multi-task processing into a general computerized tomography (CT) image segmentation model trained on large-scale data, capable of performing a wide range of segmentation tasks. The rationale is that different segmentation tasks are often correlated, and their joint learning could potentially improve overall segmentation performance. Specifically, the proposed model is designed with a transformer-based encoder-decoder architecture coupled with automatic pathway (AP) modules. It provides a common image encoding and an automatic task-driven decoding pathway for performing different segmentation tasks via specific prompts. As a unified model capable of handling multiple tasks, our model not only improves the performance of seen tasks but also quickly adapts to new unseen tasks with a relatively small number of training samples while maintaining reasonable performance. Furthermore, the modular design of automatic pathway routing allows for parameter pruning for network size reduction during the deployment.", "title":"Prompt-based Segmentation Model of Anatomical Structures and Lesions in CT Images", "authors":[ "Ouyang, Xi", "Gu, Dongdong", "Li, Xuejian", "Zhou, Wenqi", "Chen, Qianqian", "Zhan, Yiqiang", "Zhou, Xiang", "Shi, Feng", "Xue, Zhong", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":429 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3963_paper.pdf", "bibtext":"@InProceedings{ Kha_Active_MICCAI2024,\n author = { Khanal, Bidur and Dai, Tianhong and Bhattarai, Binod and Linte, Cristian },\n title = { { Active Label Refinement for Robust Training of Imbalanced Medical Image Classification Tasks in the Presence of High Label Noise } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The robustness of supervised deep learning-based medical image classification is significantly undermined by label noise in the training data. Although several methods have been proposed to enhance classification performance in the presence of noisy labels, they face some challenges: 1) a struggle with class-imbalanced datasets, leading to the frequent overlooking of minority classes as noisy samples; 2) a singular focus on maximizing performance using noisy datasets, without incorporating experts-in-the-loop for actively cleaning the noisy labels. To mitigate these challenges, we propose a two-phase approach that combines Learning with Noisy Labels (LNL) and active learning. This approach not only improves the robustness of medical image classification in the presence of noisy labels but also iteratively improves the quality of the dataset by relabeling the important incorrect labels, under a limited annotation budget. Furthermore, we introduce a novel Variance of Gradients approach in the LNL phase, which complements the loss-based sample selection by also sampling under-represented examples. Using two imbalanced noisy medical classification datasets, we demonstrate that our proposed technique is superior to its predecessors at handling class imbalance by not misidentifying clean samples from minority classes as mostly noisy samples. Code available at: https:\/\/github.com\/Bidur-Khanal\/imbalanced-medical-active-label-cleaning.git", "title":"Active Label Refinement for Robust Training of Imbalanced Medical Image Classification Tasks in the Presence of High Label Noise", "authors":[ "Khanal, Bidur", "Dai, Tianhong", "Bhattarai, Binod", "Linte, Cristian" ], "id":"Conference", "arxiv_id":"2407.05973", "GitHub":[ "https:\/\/github.com\/Bidur-Khanal\/imbalanced-medical-active-label-cleaning.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":430 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0339_paper.pdf", "bibtext":"@InProceedings{ Liu_CriDiff_MICCAI2024,\n author = { Liu, Tingwei and Zhang, Miao and Liu, Leiye and Zhong, Jialong and Wang, Shuyao and Piao, Yongri and Lu, Huchuan },\n title = { { CriDiff: Criss-cross Injection Diffusion Framework via Generative Pre-train for Prostate Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recently, the Diffusion Probabilistic Model (DPM)-based methods have achieved substantial success in the field of medical image segmentation. However, most of these methods fail to enable the diffusion model to learn edge features and non-edge features effectively and to inject them efficiently into the diffusion backbone. Additionally, the domain gap between the images features and the diffusion model features poses a great challenge to prostate segmentation. In this paper, we proposed CriDiff, a two-stage feature injecting framework with a Criss-cross Injection Strategy (CIS) and a Generative Pre-train (GP) approach for prostate segmentation. The CIS maximizes the use of multi-level features by efficiently harnessing the complementarity of high and low-level features. To effectively learn multi-level of edge features and non-edge features, we proposed two parallel conditioners in the CIS: the Boundary Enhance Conditioner (BEC) and the Core Enhance Conditioner (CEC), which discriminatively model the image edge regions and non-edge regions. Moreover, the GP approach eases the inconsistency between the images features and the diffusion model without adding additional parameters. Extensive experiments on four benchmark datasets demonstrate the effectiveness of the proposed method and achieve state-of-the-art performance on four evaluation metrics.", "title":"CriDiff: Criss-cross Injection Diffusion Framework via Generative Pre-train for Prostate Segmentation", "authors":[ "Liu, Tingwei", "Zhang, Miao", "Liu, Leiye", "Zhong, Jialong", "Wang, Shuyao", "Piao, Yongri", "Lu, Huchuan" ], "id":"Conference", "arxiv_id":"2406.14186", "GitHub":[ "https:\/\/github.com\/LiuTingWed\/CriDiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":431 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0271_paper.pdf", "bibtext":"@InProceedings{ Pan_ASA_MICCAI2024,\n author = { Pang, Jiaxuan and Ma, DongAo and Zhou, Ziyu and Gotway, Michael B. and Liang, Jianming },\n title = { { ASA: Learning Anatomical Consistency, Sub-volume Spatial Relationships and Fine-grained Appearance for CT Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"To achieve superior performance, deep learning relies on co- piousness, high-quality, annotated data, but annotating medical images is tedious, laborious, and time-consuming, demanding specialized expertise, especially for segmentation tasks. Segmenting medical images requires not only macroscopic anatomical patterns but also microscopic textural details. Given the intriguing symmetry and recurrent patterns inherent in medical images, we envision a powerful deep model that exploits high-level context, spatial relationships in anatomy, and low-level, fine- grained, textural features in tissues in a self-supervised manner. To realize this vision, we have developed a novel self-supervised learning (SSL) approach called ASA to learn anatomical consistency, sub-volume spatial relationships, and fine-grained appearance for 3D computed tomography images. The novelty of ASA stems from its utilization of intrinsic properties of medical images, with a specific focus on computed tomography volumes. ASA enhances the model\u2019s capability to learn anatomical features from the image, encompassing global representation, local spatial relationships, and intricate appearance details. Extensive experimental results validate the robustness, effectiveness, and efficiency of the pretrained ASA model. With all code and pretrained models released at GitHub.com\/JLiangLab\/ASA, we hope ASA serves as an inspiration and a foundation for developing enhanced SSL models with a deep understanding of anatomical structures and their spatial relationships, thereby improving diagnostic accuracy and facilitating advanced medical imaging applications", "title":"ASA: Learning Anatomical Consistency, Sub-volume Spatial Relationships and Fine-grained Appearance for CT Images", "authors":[ "Pang, Jiaxuan", "Ma, DongAo", "Zhou, Ziyu", "Gotway, Michael B.", "Liang, Jianming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":432 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1781_paper.pdf", "bibtext":"@InProceedings{ Luo_An_MICCAI2024,\n author = { Luo, Zihao and Luo, Xiangde and Gao, Zijun and Wang, Guotai },\n title = { { An Uncertainty-guided Tiered Self-training Framework for Active Source-free Domain Adaptation in Prostate Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models have exhibited remarkable efficacy in accurately delineating the prostate for diagnosis and treatment of prostate diseases, but challenges persist in achieving robust generalization across different medical centers. Source-free Domain Adaptation (SFDA) is a promising technique to adapt deep segmentation models to address privacy and security concerns while reducing domain shifts between source and target domains. However, recent literature indicates that the performance of SFDA remains far from satisfactory due to unpredictable domain gaps. Annotating a few target domain samples is acceptable, as it can lead to significant performance improvement with a low annotation cost. Nevertheless, due to extremely limited annotation budgets, careful consideration is needed in selecting samples for annotation. Inspired by this, our goal is to develop Active Source-free Domain Adaptation (ASFDA) for medical image segmentation. Specifically, we propose a novel Uncertainty-guided Tiered Self-training (UGTST) framework, consisting of efficient active sample selection via entropy-based primary local peak filtering to aggregate global uncertainty and diversity-aware redundancy filter, coupled with a tiered self-learning strategy, achieves stable domain adaptation. Experimental results on cross-center prostate MRI segmentation datasets revealed that our method yielded marked advancements, with a mere 5% annotation, exhibiting an average Dice score enhancement of 9.78% and 7.58% in two target domains compared with state-of-the-art methods, on par with fully supervised learning. Code is available at: https:\/\/github.com\/HiLab-git\/UGTST.", "title":"An Uncertainty-guided Tiered Self-training Framework for Active Source-free Domain Adaptation in Prostate Segmentation", "authors":[ "Luo, Zihao", "Luo, Xiangde", "Gao, Zijun", "Wang, Guotai" ], "id":"Conference", "arxiv_id":"2407.02893", "GitHub":[ "https:\/\/github.com\/hilab-git\/ugtst" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":433 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1675_paper.pdf", "bibtext":"@InProceedings{ Zho_Weaklysupervised_MICCAI2024,\n author = { Zhong, Yuan and Tang, Chenhui and Yang, Yumeng and Qi, Ruoxi and Zhou, Kang and Gong, Yuqi and Heng, Pheng-Ann and Hsiao, Janet H. and Dou, Qi },\n title = { { Weakly-supervised Medical Image Segmentation with Gaze Annotations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Eye gaze that reveals human observational patterns has increasingly been incorporated into solutions for vision tasks. Despite recent explorations on leveraging gaze to aid deep networks, few studies\nexploit gaze as an efficient annotation approach for medical image segmentation which typically entails heavy annotating costs. In this paper, we propose to collect dense weak supervision for medical image segmentation with a gaze annotation scheme. To train with gaze, we propose a multi-level framework that trains multiple networks from discriminative human attention, simulated with a set of pseudo-masks derived by applying hierarchical thresholds on gaze heatmaps. Furthermore, to mitigate gaze noise, a cross-level consistency is exploited to regularize overfitting noisy labels, steering models toward clean patterns learned by peer networks. The proposed method is validated on two public medical datasets of polyp and prostate segmentation tasks. We contribute a high-quality gaze dataset entitled GazeMedSeg as an extension to the popular medical segmentation datasets. To the best of our knowledge, this is the first gaze dataset for medical image segmentation. Our experiments demonstrate that gaze annotation outperforms previous label-efficient annotation schemes in terms of both performance and annotation time. Our collected gaze data and code are available at: https:\/\/github.com\/med-air\/GazeMedSeg.", "title":"Weakly-supervised Medical Image Segmentation with Gaze Annotations", "authors":[ "Zhong, Yuan", "Tang, Chenhui", "Yang, Yumeng", "Qi, Ruoxi", "Zhou, Kang", "Gong, Yuqi", "Heng, Pheng-Ann", "Hsiao, Janet H.", "Dou, Qi" ], "id":"Conference", "arxiv_id":"2407.07406", "GitHub":[ "https:\/\/github.com\/med-air\/GazeMedSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":434 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1160_paper.pdf", "bibtext":"@InProceedings{ Che_LowRank_MICCAI2024,\n author = { Chen, Qian and Zhu, Lei and He, Hangzhou and Zhang, Xinliang and Zeng, Shuang and Ren, Qiushi and Lu, Yanye },\n title = { { Low-Rank Mixture-of-Experts for Continual Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"The primary goal of continual learning (CL) task in medical image segmentation field is to solve the \u201ccatastrophic forgetting\u201d problem, where the model totally forgets previously learned features when it is extended to new categories (class-level) or tasks (task-level). Due to the privacy protection, the historical data labels are inaccessible. Prevalent continual learning methods primarily focus on generating pseudo-labels for old datasets to force the model to memorize the learned features. However, the incorrect pseudo-labels may corrupt the learned feature and lead to a new problem that the better the model is trained on the old task, the poorer the model performs on the new tasks. To avoid this problem, we propose a network by introducing the data-specific Mixture of Experts (MoE) structure to handle the new tasks or categories, ensuring that the network parameters of previous tasks are unaffected or only minimally impacted. To further overcome the tremendous memory costs caused by introducing additional structures, we propose a Low-Rank strategy which significantly reduces memory cost. Fortunately, for task-level CL, we find that low-rank experts learned in previous tasks do not impair subsequent tasks but can assist. For class-level CL learning, we propose a gating function combined with language features, effectively enabling the model to handle multi-organ segmentation tasks in new and old classes. We validate our method on both class-level and task-level continual learning challenges. Extensive experiments on multiple datasets show our model outperforms all other methods.", "title":"Low-Rank Mixture-of-Experts for Continual Medical Image Segmentation", "authors":[ "Chen, Qian", "Zhu, Lei", "He, Hangzhou", "Zhang, Xinliang", "Zeng, Shuang", "Ren, Qiushi", "Lu, Yanye" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":435 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0500_paper.pdf", "bibtext":"@InProceedings{ Lu_H2ASeg_MICCAI2024,\n author = { Lu, Jinpeng and Chen, Jingyun and Cai, Linghan and Jiang, Songhan and Zhang, Yongbing },\n title = { { H2ASeg: Hierarchical Adaptive Interaction and Weighting Network for Tumor Segmentation in PET\/CT Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Positron emission tomography (PET) combined with computed tomography (CT) imaging is routinely used in cancer diagnosis and prognosis by providing complementary information. Automatically segmenting tumors in PET\/CT images can significantly improve examination efficiency. Traditional multi-modal segmentation solutions mainly rely on concatenation operations for modality fusion, which fail to effectively model the non-linear dependencies between PET and CT modalities. Recent studies have investigated various approaches to optimize the fusion of modality-specific features for enhancing joint representations. However, modality-specific encoders used in these methods operate independently, inadequately leveraging the synergistic relationships inherent in PET and CT modalities, for example, the complementarity between semantics and structure. To address these issues, we propose a Hierarchical Adaptive Interaction and Weighting Network termed H2ASeg to explore the intrinsic cross-modal correlations and transfer potential complementary information. Specifically, we design a Modality-Cooperative Spatial Attention (MCSA) module that performs intra- and inter-modal interactions globally and locally. Additionally, a Target-Aware Modality Weighting (TAMW) module is developed to highlight tumor-related features within multi-modal features, thereby refining tumor segmentation. By embedding these modules across different layers, H2ASeg can hierarchically model cross-modal correlations, enabling a nuanced understanding of both semantic and structural tumor features. Extensive experiments demonstrate the superiority of H2ASeg, outperforming state-of-the-art methods on AutoPet-II and Hecktor2022 benchmarks. The code is released at https:\/\/github.com\/JinPLu\/H2ASeg.", "title":"H2ASeg: Hierarchical Adaptive Interaction and Weighting Network for Tumor Segmentation in PET\/CT Images", "authors":[ "Lu, Jinpeng", "Chen, Jingyun", "Cai, Linghan", "Jiang, Songhan", "Zhang, Yongbing" ], "id":"Conference", "arxiv_id":"2403.18339", "GitHub":[ "https:\/\/github.com\/JinPLu\/H2ASeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":436 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0249_paper.pdf", "bibtext":"@InProceedings{ Zha_TARDRL_MICCAI2024,\n author = { Zhao, Yunxi and Nie, Dong and Chen, Geng and Wu, Xia and Zhang, Daoqiang and Wen, Xuyun },\n title = { { TARDRL: Task-Aware Reconstruction for Dynamic Representation Learning of fMRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing studies in fMRI analysis leverage mask autoencoder, a self-supervised framework, to build model to learn representations and conduct prediction for various fMRI-related tasks. It involves pretraining the model by reconstructing signals of brain regions that are randomly masked at different time segments and subsequently fine-tuning it for prediction tasks. Though it has shown improved performance in prediction tasks, we argue that directly applying this framework on fMRI data may result in sub-optimal results. Firstly, random masking is ineffective for highly redundant fMRI data. Secondly, the reconstruction process is not task-aware, ignoring a critical phenomenon: the varying contributions of different brain regions to different prediction tasks. In this work, we propose and demonstrate a hypothesis that learning representations by reconstructing signals from important ROIs at different time segments can enhance prediction performance. Specifically, we introduce a novel learning framework, Task-Aware Reconstruction Dynamic Representation Learning (TARDRL), to improve prediction performance through task-aware reconstruction. Our approach incorporates an attention-guided masking strategy, which leverages attention maps from the prediction process to guide signal masking during reconstruction, making the reconstruction task task-aware. Extensive experiments show that our model outperforms state-of-the-art methods on the ABIDE and ADNI datasets, with high interpretability.", "title":"TARDRL: Task-Aware Reconstruction for Dynamic Representation Learning of fMRI", "authors":[ "Zhao, Yunxi", "Nie, Dong", "Chen, Geng", "Wu, Xia", "Zhang, Daoqiang", "Wen, Xuyun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/WENXUYUN\/TARDRL\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":437 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0322_paper.pdf", "bibtext":"@InProceedings{ Guz_Differentiable_MICCAI2024,\n author = { Guzzi, Lisa and Zuluaga, Maria A. and Lareyre, Fabien and Di Lorenzo, Gilles and Goffart, S\u00e9bastien and Chierici, Andrea and Raffort, Juliette and Delingette, Herv\u00e9 },\n title = { { Differentiable Soft Morphological Filters for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Morphological operations such as erosion, dilation, and skeletonization offer valuable tools for processing and analyzing segmentation masks. Several studies have investigated the integration of differentiable morphological operations within deep segmentation neural networks, particularly for the computation of loss functions. However, those methods have shown limitations in terms of reliability, versatility or applicability to different types of operations and image dimensions. In this paper, we present a novel framework that provides differentiable morphological filters on probabilistic maps. Given any morphological filter defined on 2D or 3D binary images, our approach generates a soft version of this filter by translating Boolean expressions into multilinear polynomials. Moreover, using proxy polynomials, these soft filters have the same computational complexity as the original binary filter. We demonstrate on diverse biomedical datasets that our method can be easily integrated into neural networks either as a loss function or as the final morphological layer in a segmentation network. In particular, we show that the proposed filters for mask erosion, dilation or skeletonization lead to competitive solutions compared to the state-of-the-art.", "title":"Differentiable Soft Morphological Filters for Medical Image Segmentation", "authors":[ "Guzzi, Lisa", "Zuluaga, Maria A.", "Lareyre, Fabien", "Di Lorenzo, Gilles", "Goffart, S\u00e9bastien", "Chierici, Andrea", "Raffort, Juliette", "Delingette, Herv\u00e9" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lisaGUZZI\/Soft-morph" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":438 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0221_paper.pdf", "bibtext":"@InProceedings{ Bai_EndoUIC_MICCAI2024,\n author = { Bai, Long and Chen, Tong and Tan, Qiaozhi and Nah, Wan Jun and Li, Yanheng and He, Zhicheng and Yuan, Sishen and Chen, Zhen and Wu, Jinlin and Islam, Mobarakol and Li, Zhen and Liu, Hongbin and Ren, Hongliang },\n title = { { EndoUIC: Promptable Diffusion Transformer for Unified Illumination Correction in Capsule Endoscopy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Wireless Capsule Endoscopy (WCE) is highly valued for its non-invasive and painless approach, though its effectiveness is compromised by uneven illumination from hardware constraints and complex internal dynamics, leading to overexposed or underexposed images. While researchers have discussed the challenges of low-light enhancement in WCE, the issue of correcting for different exposure levels remains underexplored. To tackle this, we introduce EndoUIC, a WCE unified illumination correction solution using an end-to-end promptable diffusion transformer (DiT) model. In our work, the illumination prompt module shall navigate the model to adapt to different exposure levels and perform targeted image enhancement, in which the Adaptive Prompt Integration (API) and Global Prompt Scanner (GPS) modules shall further boost the concurrent representation learning between the prompt parameters and features. Besides, the U-shaped restoration DiT model shall capture the long-range dependencies and contextual information for unified illumination restoration. Moreover, we present a novel Capsule-endoscopy Exposure Correction (CEC) dataset, including ground-truth and corrupted image pairs annotated by expert photographers. Extensive experiments against a variety of state-of-the-art (SOTA) methods on four datasets showcase the effectiveness of our proposed method and components in WCE illumination restoration, and the additional downstream experiments further demonstrate its utility for clinical diagnosis and surgical assistance. The code and the proposed dataset are available at github.com\/longbai1006\/EndoUIC.", "title":"EndoUIC: Promptable Diffusion Transformer for Unified Illumination Correction in Capsule Endoscopy", "authors":[ "Bai, Long", "Chen, Tong", "Tan, Qiaozhi", "Nah, Wan Jun", "Li, Yanheng", "He, Zhicheng", "Yuan, Sishen", "Chen, Zhen", "Wu, Jinlin", "Islam, Mobarakol", "Li, Zhen", "Liu, Hongbin", "Ren, Hongliang" ], "id":"Conference", "arxiv_id":"2406.13705", "GitHub":[ "https:\/\/github.com\/longbai1006\/EndoUIC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":439 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0915_paper.pdf", "bibtext":"@InProceedings{ Li_Improved_MICCAI2024,\n author = { Li, Chunli and Zhang, Xiaoming and Gao, Yuan and Yin, Xiaoli and Lu, Le and Zhang, Ling and Yan, Ke and Shi, Yu },\n title = { { Improved Esophageal Varices Assessment from Non-Contrast CT Scans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Esophageal varices (EV), a serious health concern resulting from portal hypertension, are traditionally diagnosed through invasive endoscopic procedures. Despite non-contrast computed tomography (NC-CT) imaging being a less expensive and non-invasive imaging modality, it has yet to gain full acceptance as a primary clinical diagnostic tool for EV evaluation. To overcome existing diagnostic challenges, we present the Multi-Organ-cOhesion-Network (MOON), a novel framework enhancing the analysis of critical organ features in NC-CT scans for effective assessment of EV. Drawing inspiration from the thorough assessment practices of radiologists, MOON establishes a cohesive multi-organ analysis model that unifies the imaging features of the related organs of EV, namely esophagus, liver, and spleen. This integration significantly increases the diagnostic accuracy for EV. We have compiled an extensive NC-CT dataset of 1,255 patients diagnosed with EV, spanning three grades of severity. Each case is corroborated by endoscopic diagnostic results. The efficacy of MOON has been substantiated through a validation process involving multi-fold cross-validation on 1,010 cases and an independent test on 245 cases, exhibiting superior diagnostic performance compared to methods focusing solely on the esophagus (for classifying severe grade: AUC of 0.864 versus 0.803, and for moderate to severe grades: AUC of 0.832 versus 0.793). To our knowledge, MOON is the first work to incorporate a synchronized multi-organ NC-CT analysis for EV assessment, providing a more acceptable and minimally invasive alternative for patients compared to traditional endoscopy.", "title":"Improved Esophageal Varices Assessment from Non-Contrast CT Scans", "authors":[ "Li, Chunli", "Zhang, Xiaoming", "Gao, Yuan", "Yin, Xiaoli", "Lu, Le", "Zhang, Ling", "Yan, Ke", "Shi, Yu" ], "id":"Conference", "arxiv_id":"2407.13210", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":440 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3839_paper.pdf", "bibtext":"@InProceedings{ De_Interpretable_MICCAI2024,\n author = { De Vries, Matt and Naidoo, Reed and Fourkioti, Olga and Dent, Lucas G. and Curry, Nathan and Dunsby, Christopher and Bakal, Chris },\n title = { { Interpretable phenotypic profiling of 3D cellular morphodynamics } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The dynamic 3D shape of a cell acts as a signal of its physiological state, reflecting the interplay of environmental stimuli and intra- and extra-cellular processes. However, there is little quantitative understanding of cell shape determination in 3D, largely due to the lack of data-driven methods that analyse 3D cell shape dynamics. To address this, we have developed MorphoSense, an interpretable, variable-length multivariate time series classification (TSC) pipeline based on multiple instance learning (MIL). We use this pipeline to classify 3D cell shape dynamics of perturbed cancer cells and learn hallmark 3D shape changes associated with clinically relevant and shape-modulating small molecule treatments. To show the generalisability across datasets, we apply our pipeline to classify migrating T-cells in collagen matrices and assess interpretability on a synthetic dataset. Across datasets, our pipeline offers increased predictive performance and higher-quality interpretations. To our knowledge, our work is the first to utilise MIL for multivariate, variable-length TSC, focusing on interpretable 3D morphodynamic profiling of biological cells.", "title":"Interpretable phenotypic profiling of 3D cellular morphodynamics", "authors":[ "De Vries, Matt", "Naidoo, Reed", "Fourkioti, Olga", "Dent, Lucas G.", "Curry, Nathan", "Dunsby, Christopher", "Bakal, Chris" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":441 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1687_paper.pdf", "bibtext":"@InProceedings{ Hua_MetaAD_MICCAI2024,\n author = { Huang, Haolin and Shen, Zhenrong and Wang, Jing and Wang, Xinyu and Lu, Jiaying and Lin, Huamei and Ge, Jingjie and Zuo, Chuantao and Wang, Qian },\n title = { { MetaAD: Metabolism-Aware Anomaly Detection for Parkinson\u2019s Disease in 3D 18F-FDG PET } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The dopamine transporter (DAT) imaging such as 11C-CFT PET has shown significant superiority in diagnosing Parkinson\u2019s Disease (PD).\nHowever, most hospitals have no access to DAT imaging but instead turn to the commonly used 18F-FDG PET, which may not show major abnormalities of PD at visual analysis and thus hinder the performance of computer-aided diagnosis (CAD).\nTo tackle this challenge, we propose a Metabolism-aware Anomaly Detection (MetaAD) framework to highlight abnormal metabolism cues of PD in 18F-FDG PET scans.\nMetaAD converts the input FDG image into a synthetic CFT image with healthy patterns, and then reconstructs the FDG image by a reversed modality mapping.\nThe visual differences between the input and reconstructed images serve as indicators of PD metabolic anomalies.\nA dual-path training scheme is adopted to prompt the generators to learn an explicit normal data distribution via cyclic modality translation while enhancing their abilities to memorize healthy metabolic characteristics.\nThe experiments reveal that MetaAD not only achieves superior performance in visual interpretability and anomaly detection for PD diagnosis, but also shows effectiveness in assisting supervised CAD methods.", "title":"MetaAD: Metabolism-Aware Anomaly Detection for Parkinson\u2019s Disease in 3D 18F-FDG PET", "authors":[ "Huang, Haolin", "Shen, Zhenrong", "Wang, Jing", "Wang, Xinyu", "Lu, Jiaying", "Lin, Huamei", "Ge, Jingjie", "Zuo, Chuantao", "Wang, Qian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MedAIerHHL\/MetaAD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":442 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3997_paper.pdf", "bibtext":"@InProceedings{ Pac_Vertex_MICCAI2024,\n author = { Pacheco, Carolina and Yellin, Florence and Vidal, Rene\u0301 and Haeffele, Benjamin },\n title = { { Vertex Proportion Loss for Multi-Class Cell Detection from Label Proportions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Learning from label proportions (LLP) is a weakly supervised classification task in which training instances are grouped into bags annotated only with class proportions. While this task emerges naturally in many applications, its performance is often evaluated in bags generated artificially by sampling uniformly from balanced, annotated datasets. In contrast, we study the LLP task in multi-class blood cell detection, where each image can be seen as a \u201cbag\u201d\u2019 of cells and class proportions can be obtained using a hematocytometer. This application introduces several challenges that are not appropriately captured by the usual LLP evaluation regime, including variable bag size, noisy proportion annotations, and inherent class imbalance. In this paper, we propose the Vertex Proportion loss, a new, principled loss for LLP, which uses optimal transport to infer instance labels from label proportions, and a Deep Sparse Detector that leverages the sparsity of the images to localize and learn a useful representation of the cells in a self-supervised way. We demonstrate the advantages of the proposed method over existing approaches when evaluated in real and synthetic white blood cell datasets.", "title":"Vertex Proportion Loss for Multi-Class Cell Detection from Label Proportions", "authors":[ "Pacheco, Carolina", "Yellin, Florence", "Vidal, Rene\u0301", "Haeffele, Benjamin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/carolina-pacheco\/LLP_multiclass_cell_detection\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":443 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2889_paper.pdf", "bibtext":"@InProceedings{ Zho_Enhancing_MICCAI2024,\n author = { Zhou, Tianfeng and Zhou, Yukun },\n title = { { Enhancing Model Generalisability through Sampling Diverse and Balanced Retinal Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Model generalisability, i.e. performance on multiple unseen datasets, can be improved by training on large volumes of annotated data, from which models can learn diverse representations. However, annotated medical data is limited due to the scarcity of expertise. In this work, we present an efficient data sampling pipeline to select DIVerse and bAlanced images (DataDIVA) from image pools to maximise model generalisability in retinal imaging. Specifically, we first extract image feature embeddings using the foundation model off-the-shelf and generate embedding clusters. We then evenly sample images from those diverse clusters and train a model. We run the trained model on the whole unlabelled image pool and sample the remaining images from those classified as rare categories. This pipeline aims to sample the retinal images with diverse representations and mitigate the unbalanced distribution. We show that DataDIVA consistently improved the model performance in both internal and external evaluation, on six public datasets, with clinically meaningful tasks of referable diabetic retinopathy and glaucoma detection. The code is available at https:\/\/doi.org\/10.5281\/zenodo.12674694.", "title":"Enhancing Model Generalisability through Sampling Diverse and Balanced Retinal Images", "authors":[ "Zhou, Tianfeng", "Zhou, Yukun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":444 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0869_paper.pdf", "bibtext":"@InProceedings{ Wan_Selfguided_MICCAI2024,\n author = { Wang, Zhepeng and Bao, Runxue and Wu, Yawen and Liu, Guodong and Yang, Lei and Zhan, Liang and Zheng, Feng and Jiang, Weiwen and Zhang, Yanfu },\n title = { { Self-guided Knowledge-injected Graph Neural Network for Alzheimer\u2019s Diseases } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Graph neural networks (GNNs) are proficient machine learning models in handling irregularly structured data. Nevertheless, their generic formulation falls short when applied to the analysis of brain connectomes in Alzheimer\u2019s Disease (AD), necessitating the incorporation of domain-specific knowledge to achieve optimal model performance. The integration of AD-related expertise into GNNs presents a significant challenge. Current methodologies reliant on manual design often demand substantial expertise from external domain specialists to guide the development of novel models, thereby consuming considerable time and resources. To mitigate the need for manual curation, this paper introduces a novel self-guided knowledge-infused multimodal GNN to autonomously integrate domain knowledge into the model development process. We propose to conceptualize existing domain knowledge as natural language, and devise a specialized multimodal GNN framework tailored to leverage this uncurated knowledge to direct the learning of the GNN submodule, thereby enhancing its efficacy and improving prediction interpretability. To assess the effectiveness of our framework, we compile a comprehensive literature dataset comprising recent peer-reviewed publications on AD. By integrating this literature dataset with several real-world AD datasets, our experimental results illustrate the effectiveness of the proposed method in extracting curated knowledge and offering explanations on graphs for domain-specific applications. Furthermore, our approach successfully utilizes the extracted information to enhance the performance of the GNN.", "title":"Self-guided Knowledge-injected Graph Neural Network for Alzheimer\u2019s Diseases", "authors":[ "Wang, Zhepeng", "Bao, Runxue", "Wu, Yawen", "Liu, Guodong", "Yang, Lei", "Zhan, Liang", "Zheng, Feng", "Jiang, Weiwen", "Zhang, Yanfu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":445 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3077_paper.pdf", "bibtext":"@InProceedings{ Xie_SurgicalGaussian_MICCAI2024,\n author = { Xie, Weixing and Yao, Junfeng and Cao, Xianpeng and Lin, Qiqin and Tang, Zerui and Dong, Xiao and Guo, Xiaohu },\n title = { { SurgicalGaussian: Deformable 3D Gaussians for High-Fidelity Surgical Scene Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dynamic reconstruction of deformable tissues in endoscopic video is a key technology for robot-assisted surgery. Recent reconstruction methods based on neural radiance fields (NeRFs) have achieved remarkable results in the reconstruction of surgical scenes. However, based on implicit representation, NeRFs struggle to capture the intricate details of objects in the scene and cannot achieve real-time rendering. In addition, restricted single view perception and occluded instruments also propose special challenges in surgical scene reconstruction. To address these issues, we develop SurgicalGaussian, a deformable 3D Gaussian Splatting method to model dynamic surgical scenes. Our approach models the spatio-temporal features of soft tissues at each time stamp via a forward-mapping deformation MLP and regularization to constrain local 3D Gaussians to comply with consistent movement. With the depth initialization strategy and tool mask-guided training, our method can remove surgical instruments and reconstruct high-fidelity surgical scenes. Through experiments on various surgical videos, our network outperforms existing method on many aspects, including rendering quality, rendering speed and GPU usage. The project page can be found at https:\/\/surgicalgaussian.github.io.", "title":"SurgicalGaussian: Deformable 3D Gaussians for High-Fidelity Surgical Scene Reconstruction", "authors":[ "Xie, Weixing", "Yao, Junfeng", "Cao, Xianpeng", "Lin, Qiqin", "Tang, Zerui", "Dong, Xiao", "Guo, Xiaohu" ], "id":"Conference", "arxiv_id":"2407.05023", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2407.05023", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":446 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2384_paper.pdf", "bibtext":"@InProceedings{ Che_WiNet_MICCAI2024,\n author = { Cheng, Xinxing and Jia, Xi and Lu, Wenqi and Li, Qiufu and Shen, Linlin and Krull, Alexander and Duan, Jinming },\n title = { { WiNet: Wavelet-based Incremental Learning for Efficient Medical Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep image registration has demonstrated exceptional accuracy and fast inference. Recent advances have adopted either multiple cascades or pyramid architectures to estimate dense deformation fields in a coarse-to-fine manner. However, due to the cascaded nature and repeated composition\/warping operations on feature maps, these methods negatively increase memory usage during training and testing. Moreover, such approaches lack explicit constraints on the learning process of small deformations at different scales, thus lacking explainability. In this study, we introduce a model-driven WiNet that incrementally estimates scale-wise wavelet coefficients for the displacement\/velocity field across various scales, utilizing the wavelet coefficients derived from the original input image pair. By exploiting the properties of the wavelet transform, these estimated coefficients facilitate the seamless reconstruction of a full-resolution displacement\/velocity field via our devised inverse discrete wavelet transform (IDWT) layer. This approach avoids the complexities of cascading networks or composition operations, making our WiNet an explainable and efficient competitor with other coarse-to-fine methods. Extensive experimental results from two 3D datasets show that our WiNet is accurate and GPU efficient. Code is available at \\url{https:\/\/github.com\/x-xc\/WiNet}.", "title":"WiNet: Wavelet-based Incremental Learning for Efficient Medical Image Registration", "authors":[ "Cheng, Xinxing", "Jia, Xi", "Lu, Wenqi", "Li, Qiufu", "Shen, Linlin", "Krull, Alexander", "Duan, Jinming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/x-xc\/WiNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":447 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0737_paper.pdf", "bibtext":"@InProceedings{ Wu_Cephalometric_MICCAI2024,\n author = { Wu, Han and Wang, Chong and Mei, Lanzhuju and Yang, Tong and Zhu, Min and Shen, Dinggang and Cui, Zhiming },\n title = { { Cephalometric Landmark Detection across Ages with Prototypical Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated cephalometric landmark detection is crucial in real-world orthodontic diagnosis.\nCurrent studies mainly focus on only adult subjects, neglecting the clinically crucial scenario presented by adolescents whose landmarks often exhibit significantly different appearances compared to adults.\nHence, an open question arises about how to develop a unified and effective detection algorithm across various age groups, including adolescents and adults. \nIn this paper, we propose CeLDA, the first work for \\textbf{Ce}phalometric \\textbf{L}andmark \\textbf{D}etection across \\textbf{A}ges.\nOur method leverages a prototypical network for landmark detection by comparing image features with landmark prototypes. \nTo tackle the appearance discrepancy of landmarks between age groups, we design new strategies for CeLDA to improve prototype alignment and obtain a holistic estimation of landmark prototypes from a large set of training images.\nMoreover, a novel prototype relation mining paradigm is introduced to exploit the anatomical relations between the landmark prototypes. \nExtensive experiments validate the superiority of CeLDA in detecting cephalometric landmarks on both adult and adolescent subjects. \nTo our knowledge, this is the first effort toward developing a unified solution and dataset for cephalometric landmark detection across age groups. \nOur code and dataset will be made public on https:\/\/github.com\/ShanghaiTech-IMPACT\/CeLDA.", "title":"Cephalometric Landmark Detection across Ages with Prototypical Network", "authors":[ "Wu, Han", "Wang, Chong", "Mei, Lanzhuju", "Yang, Tong", "Zhu, Min", "Shen, Dinggang", "Cui, Zhiming" ], "id":"Conference", "arxiv_id":"2406.12577", "GitHub":[ "https:\/\/github.com\/ShanghaiTech-IMPACT\/CeLDA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":448 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2617_paper.pdf", "bibtext":"@InProceedings{ Fan_Simultaneous_MICCAI2024,\n author = { Fan, Wenkang and Jiang, Wenjing and Fang, Hao and Shi, Hong and Chen, Jianhua and Luo, Xiongbiao },\n title = { { Simultaneous Monocular Endoscopic Dense Depth and Odometry Estimation Using Local-Global Integration Networks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate dense depth prediction of monocular endoscopic images is essential in expanding the surgical field and augmenting the perception of depth for surgeons. However, it remains challenging since endoscopic videos generally suffer from limited field of view, illumination variations, and weak texture. This work proposes LGIN, a new architecture with unsupervised learning for accurate dense depth recovery of monocular endoscopic images. Specifically, LGIN creates a hybrid encoder using dense convolution and pyramid vision transformer to extract local textural features and global spatial-temporal features in parallel, while building a decoder to effectively integrate the local and global features and use two-heads to estimate dense depth and odometry simultaneously, respectively. Additionally, we extract structure-valid regions to assist odometry prediction and unsupervised training to improve the accuracy of depth prediction. We evaluated our model on both clinical and synthetic unannotated colonoscopic video images, with the experimental results demonstrating that our model can achieve more accurate depth distribution and more sufficient textures. Both the qualitative and quantitative assessment results of our method are better than current monocular dense depth estimation models.", "title":"Simultaneous Monocular Endoscopic Dense Depth and Odometry Estimation Using Local-Global Integration Networks", "authors":[ "Fan, Wenkang", "Jiang, Wenjing", "Fang, Hao", "Shi, Hong", "Chen, Jianhua", "Luo, Xiongbiao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":449 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0102_paper.pdf", "bibtext":"@InProceedings{ Wu_Few_MICCAI2024,\n author = { Wu, Xinyao and Xu, Zhe and Tong, Raymond Kai-yu },\n title = { { Few Slices Suffice: Multi-Faceted Consistency Learning with Active Cross-Annotation for Barely-supervised 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning-based 3D medical image segmentation typically demands extensive densely labeled data. Yet, voxel-wise annotation is laborious and costly to obtain. Cross-annotation, which involves annotating only a few slices from different orientations, has recently become an attractive strategy for labeling 3D images. Compared to previous weak labeling methods like bounding boxes and scribbles, it can efficiently preserve the 3D object\u2019s shape and precise boundaries. However, learning from such sparse supervision signals (aka. barely supervised learning (BSL)) still poses great challenges including less fine-grained object perception, less compact class features and inferior generalizability. To this end, we present a Multi-Faceted ConSistency (MF-ConS) learning framework for the BSL scenario. Our approach starts with an active cross-annotation strategy that requires only three orthogonal labeled slices per scan, optimizing the usage of limited annotation budget through a human-in-the-loop process. Building on the popular teacher-student model, MF-ConS is equipped with three types of consistency regularization to tackle the aforementioned challenges of BSL: (i) neighbor-informed object prediction consistency, which improves fine-grained object perception by encouraging the student model to infer complete segmentation from partial visual cues; (ii) non-parametric prototype-driven consistency for more discriminative and compact intra-class features; (iii) a stability constraint under mild perturbations to enhance model\u2019s robustness. Our method is evaluated on the task of brain tumor segmentation from T2-FLAIR MRI and the promising results show the superiority of our approach over relevant state-of-the-art methods.", "title":"Few Slices Suffice: Multi-Faceted Consistency Learning with Active Cross-Annotation for Barely-supervised 3D Medical Image Segmentation", "authors":[ "Wu, Xinyao", "Xu, Zhe", "Tong, Raymond Kai-yu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":450 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3260_paper.pdf", "bibtext":"@InProceedings{ Spi_SelfSupervised_MICCAI2024,\n author = { Spieker, Veronika and Eichhorn, Hannah and Stelter, Jonathan K. and Huang, Wenqi and Braren, Rickmer F. and Rueckert, Daniel and Sahli Costabal, Francisco and Hammernik, Kerstin and Prieto, Claudia and Karampinos, Dimitrios C. and Schnabel, Julia A. },\n title = { { Self-Supervised k-Space Regularization for Motion-Resolved Abdominal MRI Using Neural Implicit k-Space Representations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neural implicit k-space representations have shown promising results for dynamic MRI at high temporal resolutions. Yet, their exclusive training in k-space limits the application of common image regularization methods to improve the final reconstruction. In this work, we introduce the concept of parallel imaging-inspired self-consistency (PISCO), which we incorporate as novel self-supervised k-space regularization enforcing a consistent neighborhood relationship. At no additional data cost, the proposed regularization significantly improves neural implicit k-space reconstructions on simulated data. Abdominal in-vivo reconstructions using PISCO result in enhanced spatio-temporal image quality compared to state-of-the-art methods. Code available at ***.git.", "title":"Self-Supervised k-Space Regularization for Motion-Resolved Abdominal MRI Using Neural Implicit k-Space Representations", "authors":[ "Spieker, Veronika", "Eichhorn, Hannah", "Stelter, Jonathan K.", "Huang, Wenqi", "Braren, Rickmer F.", "Rueckert, Daniel", "Sahli Costabal, Francisco", "Hammernik, Kerstin", "Prieto, Claudia", "Karampinos, Dimitrios C.", "Schnabel, Julia A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-spieker" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":451 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1626_paper.pdf", "bibtext":"@InProceedings{ Zho_MedMLP_MICCAI2024,\n author = { Zhou, Menghan and Xu, Yanyu and Soh, Zhi Da and Fu, Huazhu and Goh, Rick Siow Mong and Cheng, Ching-Yu and Liu, Yong and Zhen, Liangli },\n title = { { MedMLP: An Efficient MLP-like Network for Zero-shot Retinal Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep neural networks (DNNs) have demonstrated superior performance compared to humans across various tasks. However, DNNs often face the challenge of domain shift, where their performance notably deteriorates when applied to medical images with distributions differing from those seen during training. To address this issue and achieve high performance in new target domains under zero-shot settings, we leverage the ability of self-attention mechanisms to capture global dependencies. We introduce a novel MLP-like model designed for superior efficiency and zero-shot robustness. Specifically, we propose an adaptive fully-connected (AdaFC) layer to overcome the fundamental limitation of traditional fully-connected layers in adapting to inputs of various sizes while maintaining GPU efficiency. Building upon AdaFC, we present a new MLP-based network architecture named MedMLP. Through our proposed training pipeline, we achieve a significant 20.1% increase in model testing accuracy on an out-of-distribution dataset, surpassing the widely used ResNet-50 model.", "title":"MedMLP: An Efficient MLP-like Network for Zero-shot Retinal Image Classification", "authors":[ "Zhou, Menghan", "Xu, Yanyu", "Soh, Zhi Da", "Fu, Huazhu", "Goh, Rick Siow Mong", "Cheng, Ching-Yu", "Liu, Yong", "Zhen, Liangli" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":452 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2442_paper.pdf", "bibtext":"@InProceedings{ Li_3DPX_MICCAI2024,\n author = { Li, Xiaoshuang and Meng, Mingyuan and Huang, Zimo and Bi, Lei and Delamare, Eduardo and Feng, Dagan and Sheng, Bin and Kim, Jinman },\n title = { { 3DPX: Progressive 2D-to-3D Oral Image Reconstruction with Hybrid MLP-CNN Networks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Panoramic X-ray (PX) is a prevalent modality in dental practice for its wide availability and low cost. However, as a 2D projection image, PX does not contain 3D anatomical information, and therefore has limited use in dental applications that can benefit from 3D information, e.g., tooth angular misalignment detection and classification. Reconstructing 3D structures directly from 2D PX has recently been explored to address limitations with existing methods primarily reliant on Convolutional Neural Networks (CNNs) for direct 2D-to-3D mapping. These methods, however, are unable to correctly infer depth-axis spatial information. In addition, they are limited by the intrinsic locality of convolution operations, as the convolution kernels only capture the information of immediate neighborhood pixels. In this study, we propose a progressive hybrid Multilayer Perceptron (MLP)-CNN pyramid network (3DPX) for 2D-to-3D oral PX reconstruction. We introduce a progressive reconstruction strategy, where 3D images are progressively reconstructed in the 3DPX with guidance imposed on the intermediate reconstruction result at each pyramid level. Further, motivated by the recent advancement of MLPs that show promise in capturing fine-grained long-range dependency, our 3DPX integrates MLPs and CNNs to improve the semantic understanding during reconstruction. Extensive experiments on two large datasets involving 464 studies demonstrate that our 3DPX outperforms state-of-the-art 2D-to-3D oral reconstruction methods, including standalone MLP and transformers, in reconstruction quality, and also improves the performance of downstream angular misalignment classification tasks.", "title":"3DPX: Progressive 2D-to-3D Oral Image Reconstruction with Hybrid MLP-CNN Networks", "authors":[ "Li, Xiaoshuang", "Meng, Mingyuan", "Huang, Zimo", "Bi, Lei", "Delamare, Eduardo", "Feng, Dagan", "Sheng, Bin", "Kim, Jinman" ], "id":"Conference", "arxiv_id":"2408.01292", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":453 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1747_paper.pdf", "bibtext":"@InProceedings{ Ju_AWeaklysupervised_MICCAI2024,\n author = { Ju, Jianguo and Ren, Shumin and Qiu, Dandan and Tu, Huijuan and Yin, Juanjuan and Xu, Pengfei and Guan, Ziyu },\n title = { { A Weakly-supervised Multi-lesion Segmentation Framework Based on Target-level Incomplete Annotations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Effectively segmenting Crohn\u2019s disease (CD) from computed tomography is crucial for clinical use. Given the difficulty of obtaining manual annotations, more and more researchers have begun to pay attention to weakly supervised methods. However, due to the challenges of designing weakly supervised frameworks with limited and complex medical data, most existing frameworks tend to study single-lesion diseases ignoring multi-lesion scenarios. In this paper, we propose a new local-to-global weakly supervised neural framework for effective CD segmentation. Specifically, we develop a novel weak annotation strategy called Target-level Incomplete Annotation (TIA). This strategy only annotates one region on each slice as a labeled sample, which significantly relieves the burden of annotation. We observe that the classification networks can discover target regions with more details when replacing the input images with their local views. Taking this into account, we first design a TIA-based affinity cropping network to crop multiple local views with global anatomical information from the global view. Then, we leverage a local classification branch to extract more detailed features from multiple local views. Our framework utilizes a local views-based class distance loss and cross-entropy loss to optimize local and global classification branches to generate high-quality pseudo-labels that can be directly used as supervisory information for the semantic segmentation network. Experimental results show that our framework achieves an average DSC score of 47.8% on the CD71 dataset. Our code is available at https:\/\/github.com\/HeyJGJu\/CD_TIA.", "title":"A Weakly-supervised Multi-lesion Segmentation Framework Based on Target-level Incomplete Annotations", "authors":[ "Ju, Jianguo", "Ren, Shumin", "Qiu, Dandan", "Tu, Huijuan", "Yin, Juanjuan", "Xu, Pengfei", "Guan, Ziyu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HeyJGJu\/CD_TIA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":454 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0862_paper.pdf", "bibtext":"@InProceedings{ Ren_Selfsupervised_MICCAI2024,\n author = { Ren, Jiaxiang and Li, Zhenghong and Cheng, Wensheng and Zou, Zhilin and Park, Kicheon and Pan, Yingtian and Ling, Haibin },\n title = { { Self-supervised 3D Skeleton Completion for Vascular Structures } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D skeleton is critical for analyzing vascular structures with many applications, it is however often limited by the broken skeletons due to image degradation. Existing methods usually correct such skeleton breaks via handcrafted connecting rules or rely on nontrivial manual annotation, which is susceptible to outliers or costly especially for 3D data. In this paper, we propose a self-supervised approach for vasculature reconnection. Specifically, we generate synthetic breaks from confident skeletons and use them to guide the learning of a 3D UNet-like skeleton completion network. To address serious imbalance among different types of skeleton breaks, we introduce three skeleton transformations that largely alleviate such imbalance in synthesized break samples. This allows our model to effectively handle challenging breaks such as bifurcations and tiny fragments. Additionally, to encourage the connectivity outcomes, we design a novel differentiable connectivity loss for further improvement. Experiments on a public medical segmentation benchmark and a 3D optical coherence Doppler tomography (ODT) dataset show the effectiveness of our method.", "title":"Self-supervised 3D Skeleton Completion for Vascular Structures", "authors":[ "Ren, Jiaxiang", "Li, Zhenghong", "Cheng, Wensheng", "Zou, Zhilin", "Park, Kicheon", "Pan, Yingtian", "Ling, Haibin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/reckdk\/SkelCompletion-3D" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":455 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0813_paper.pdf", "bibtext":"@InProceedings{ Xu_LGRNet_MICCAI2024,\n author = { Xu, Huihui and Yang, Yijun and Aviles-Rivero, Angelica I and Yang, Guang and Qin, Jing and Zhu, Lei },\n title = { { LGRNet: Local-Global Reciprocal Network for Uterine Fibroid Segmentation in Ultrasound Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Regular screening and early discovery of uterine fibroid are crucial for preventing potential malignant transformations and ensuring timely, life-saving interventions. To this end, we collect and annotate the first ultrasound video dataset with 100 videos for uterine fibroid segmentation (UFUV). \nWe also present Local-Global Reciprocal Network (LGRNet) to efficiently and effectively propagate the long-term temporal context which is crucial to help distinguish between uninformative noisy surrounding tissues and target lesion regions.\nSpecifically, the Cyclic Neighborhood Propagation (CNP) is introduced to propagate the inter-frame local temporal context in a cyclic manner. \nMoreover, to aggregate global temporal context, we first condense each frame into a set of frame bottleneck queries and devise Hilbert Selective Scan (HilbertSS) to both efficiently path connect each frame and preserve the locality bias. A distribute layer is then utilized to disseminate back the global context for reciprocal refinement. \nExtensive experiments on UFUV and three public Video Polyp Segmentation (VPS) datasets demonstrate consistent improvements compared to state-of-the-art segmentation methods, indicating the effectiveness and versatility of LGRNet. \nCode, checkpoints, and dataset are available at https:\/\/github.com\/bio-mlhui\/LGRNet", "title":"LGRNet: Local-Global Reciprocal Network for Uterine Fibroid Segmentation in Ultrasound Videos", "authors":[ "Xu, Huihui", "Yang, Yijun", "Aviles-Rivero, Angelica I", "Yang, Guang", "Qin, Jing", "Zhu, Lei" ], "id":"Conference", "arxiv_id":"2407.05703", "GitHub":[ "https:\/\/github.com\/bio-mlhui\/LGRNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":456 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0708_paper.pdf", "bibtext":"@InProceedings{ Oh_Uncertaintyaware_MICCAI2024,\n author = { Oh, Seok-Hwan and Jung, Guil and Kim, Sang-Yun and Kim, Myeong-Gee and Kim, Young-Min and Lee, Hyeon-Jik and Kwon, Hyuk-Sool and Bae, Hyeon-Min },\n title = { { Uncertainty-aware meta-weighted optimization framework for domain-generalized medical image segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of echocardiograph images is essential for the diagnosis of cardiovascular diseases. Recent advances in deep learning have opened a possibility for automated cardiac image segmentation. However, the data-driven echocardiography segmentation schemes suffer from domain shift problems, since the ultrasonic image characteristics are largely affected by measurement conditions determined by device and probe specification. In order to overcome this problem, we propose a domain generalization method, utilizing a generative model for data augmentation. An acoustic content and style-aware diffusion probabilistic model is proposed to synthesize echocardiography images of diverse cardiac anatomy and measurement conditions. In addition, a meta-learning-based spatial weighting scheme is introduced to prevent the network from training unreliable pixels of synthetic images, thereby achieving precise image segmentation. The proposed framework is thoroughly evaluated using both in-distribution and out-of-distribution echocardiography datasets and demonstrates outstanding performance compared to state-of-the-art methods.", "title":"Uncertainty-aware meta-weighted optimization framework for domain-generalized medical image segmentation", "authors":[ "Oh, Seok-Hwan", "Jung, Guil", "Kim, Sang-Yun", "Kim, Myeong-Gee", "Kim, Young-Min", "Lee, Hyeon-Jik", "Kwon, Hyuk-Sool", "Bae, Hyeon-Min" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Seokhwan-Oh\/MLSW" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":457 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4215_paper.pdf", "bibtext":"@InProceedings{ Kha_DomainAdapt_MICCAI2024,\n author = { Khan, Misaal and Singh, Richa and Vatsa, Mayank and Singh, Kuldeep },\n title = { { DomainAdapt: Leveraging Multitask Learning and Domain Insights for Children\u2019s Nutritional Status Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study presents a novel approach for automating nutritional status assessments in children, designed to assist health workers in public health contexts. We introduce DomainAdapt a novel dynamic task-weighing method within a multitask learning framework, which leverages domain knowledge and Mutual Information to balance task-specific losses, enhancing the learning efficiency for nutritional status screening. We have also assembled an unprecedented dataset comprising 16,938 multipose images and anthropometric data from 2,141 children across various settings, marking a significant first in this domain. Through rigorous testing, this method demonstrates superior performance in identifying malnutrition in children and predicting their anthropometric measures compared to existing multitask learning approaches. Dataset is available at : iab-rubric.org\/resources\/healthcare-datasets\/anthrovision-dataset", "title":"DomainAdapt: Leveraging Multitask Learning and Domain Insights for Children\u2019s Nutritional Status Assessment", "authors":[ "Khan, Misaal", "Singh, Richa", "Vatsa, Mayank", "Singh, Kuldeep" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":458 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2860_paper.pdf", "bibtext":"@InProceedings{ Cen_ORCGT_MICCAI2024,\n author = { Cen, Min and Wang, Zheng and Zhuang, Zhenfeng and Zhang, Hong and Su, Dan and Bao, Zhen and Wei, Weiwei and Magnier, Baptiste and Yu, Lequan and Wang, Liansheng },\n title = { { ORCGT: Ollivier-Ricci Curvature-based Graph Model for Lung STAS Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tumor Spread Through Air Spaces (STAS), identified as a mechanism of invasion, has been substantiated by multiple studies to be associated with lower survival rates, underscoring its significant prognostic implications. In clinical practice, pathological diagnosis is regarded as the gold standard for STAS examination. Nonetheless, manual STAS diagnosis is characterized by labor-intensive and time-consuming processes, which are susceptible to misdiagnosis. In this paper, we attempt for the first time to identify the underlying features from histopathological images for the automatic prediction of STAS. Existing deep learning-based methods usually produce undesirable predictive performance with poor interpretability for this task, as they fail to identify small tumor cells spread around the main tumor and their complex correlations. To address these issues, we propose a novel Ollivier-Ricci Curvature-based Graph model for STAS prediction (ORCGT), which utilizes the information from the major tumor margin to improve both the accuracy and interpretability. The model first extracts the major tumor margin by a tumor density map with minimal and coarse annotations, which enhances the visibility of small tumor regions to the model. Then, we develop a Pool-Refined Ollivier-Ricci Curvature-based module to enable complex interactions between patches regardless of long distances and reduce the negative impact of the over-squashing phenomenon among patches linked by negative curvature edges. Extensive experiments conducted on our collected dataset demonstrate the effectiveness and interpretability of the proposed approach for predicting lung STAS.", "title":"ORCGT: Ollivier-Ricci Curvature-based Graph Model for Lung STAS Prediction", "authors":[ "Cen, Min", "Wang, Zheng", "Zhuang, Zhenfeng", "Zhang, Hong", "Su, Dan", "Bao, Zhen", "Wei, Weiwei", "Magnier, Baptiste", "Yu, Lequan", "Wang, Liansheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhengwang9\/ORCGT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":459 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1279_paper.pdf", "bibtext":"@InProceedings{ Hua_AReferandGround_MICCAI2024,\n author = { Huang, Xiaoshuang and Huang, Haifeng and Shen, Lingdong and Yang, Yehui and Shang, Fangxin and Liu, Junwei and Liu, Jia },\n title = { { A Refer-and-Ground Multimodal Large Language Model for Biomedicine } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"With the rapid development of multimodal large language models (MLLMs), especially their capabilities in visual chat through refer and ground functionalities, their significance is increasingly recognized. However, the biomedical field currently exhibits a substantial gap in this area, primarily due to the absence of a dedicated refer and ground dataset for biomedical images. To address this challenge, we devised the Med-GRIT-270k dataset. It comprises 270k question-and-answer pairs and spans eight distinct medical imaging modalities. Most importantly, it is the first dedicated to the biomedical domain and integrating refer and ground conversations. The key idea is to sample large-scale biomedical image-mask pairs from medical segmentation datasets and generate instruction datasets from text using chatGPT. Additionally, we introduce a Refer-and-GrounD Multimodal Large Language Model for Biomedicine (BiRD) by using this dataset and multi-task instruction learning. Extensive experiments have corroborated the efficacy of the Med-GRIT-270k dataset and the multi-modal, fine-grained interactive capabilities of the BiRD model. This holds significant reference value for the exploration and development of intelligent biomedical assistants. The repository is at https:\/\/github.com\/ShawnHuang497\/BiRD", "title":"A Refer-and-Ground Multimodal Large Language Model for Biomedicine", "authors":[ "Huang, Xiaoshuang", "Huang, Haifeng", "Shen, Lingdong", "Yang, Yehui", "Shang, Fangxin", "Liu, Junwei", "Liu, Jia" ], "id":"Conference", "arxiv_id":"2406.18146", "GitHub":[ "https:\/\/github.com\/ShawnHuang497\/BiRD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":460 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2429_paper.pdf", "bibtext":"@InProceedings{ Du_DistributionallyAdaptive_MICCAI2024,\n author = { Du, Jing and Dong, Guangwei and Ma, Congbo and Xue, Shan and Wu, Jia and Yang, Jian and Beheshti, Amin and Sheng, Quan Z. and Giral, Alexis },\n title = { { Distributionally-Adaptive Variational Meta Learning for Brain Graph Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent developments in Graph Neural Networks~(GNNs) have shed light on understanding brain networks through innovative approaches. Despite these innovations, the significant costs associated with data collection and the challenges posed by data drift in real-world scenarios present substantial hurdles for models dependent on large datasets to capture brain activity features. \nTo address these issues, we introduce the Distributionally-Adaptive Variational Meta Learning (DAML) framework, \ndesigned to equip the model with rapid adaptability to varying distributions by meta-learning-driven minimization of discrepancies between subject sets. Initially, we employ a graph encoder with the message-passing strategy to generate precise brain graph representations. Subsequently, we implement a distributionally-adaptive variational meta learning approach to functionally simulate data drift across subject sets, utilizing variational layers for parameterization and adaptive alignment methods to reduce discrepancies. Through comprehensive experiments on three real-world datasets with both few-shot and standard settings against various baselines, our DAML model demonstrates the state-of-the-art performance across all metrics, underscoring its efficiency and potential within limited data.", "title":"Distributionally-Adaptive Variational Meta Learning for Brain Graph Classification", "authors":[ "Du, Jing", "Dong, Guangwei", "Ma, Congbo", "Xue, Shan", "Wu, Jia", "Yang, Jian", "Beheshti, Amin", "Sheng, Quan Z.", "Giral, Alexis" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":461 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2266_paper.pdf", "bibtext":"@InProceedings{ Alh_FedMedICL_MICCAI2024,\n author = { Alhamoud, Kumail and Ghunaim, Yasir and Alfarra, Motasem and Hartvigsen, Thomas and Torr, Philip and Ghanem, Bernard and Bibi, Adel and Ghassemi, Marzyeh },\n title = { { FedMedICL: Towards Holistic Evaluation of Distribution Shifts in Federated Medical Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"For medical imaging AI models to be clinically impactful, they must generalize. However, this goal is hindered by \\emph{(i)} diverse types of distribution shifts, such as temporal, demographic, and label shifts, and \\emph{(ii)} limited diversity in datasets that are siloed within single medical institutions. While these limitations have spurred interest in federated learning, current evaluation benchmarks fail to evaluate different shifts simultaneously. However, in real healthcare settings, multiple types of shifts co-exist, yet their impact on medical imaging performance remains unstudied. In response, we introduce FedMedICL, a unified framework and benchmark to holistically evaluate federated medical imaging challenges, simultaneously capturing label, demographic, and temporal distribution shifts. We comprehensively evaluate several popular methods on six diverse medical imaging datasets (totaling 550 GPU hours). Furthermore, we use FedMedICL to simulate COVID-19 propagation across hospitals and evaluate whether methods can adapt to pandemic changes in disease prevalence. We find that a simple batch balancing technique surpasses advanced methods in average performance across FedMedICL experiments. This finding questions the applicability of results from previous, narrow benchmarks in real-world medical settings. Code is available at: \\url{https:\/\/github.com\/m1k2zoo\/FedMedICL}.", "title":"FedMedICL: Towards Holistic Evaluation of Distribution Shifts in Federated Medical Imaging", "authors":[ "Alhamoud, Kumail", "Ghunaim, Yasir", "Alfarra, Motasem", "Hartvigsen, Thomas", "Torr, Philip", "Ghanem, Bernard", "Bibi, Adel", "Ghassemi, Marzyeh" ], "id":"Conference", "arxiv_id":"2407.08822", "GitHub":[ "https:\/\/github.com\/m1k2zoo\/FedMedICL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":462 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3736_paper.pdf", "bibtext":"@InProceedings{ Li_Development_MICCAI2024,\n author = { Li, Guoshi and Thung, Kim-Han and Taylor, Hoyt and Wu, Zhengwang and Li, Gang and Wang, Li and Lin, Weili and Ahmad, Sahar and Yap, Pew-Thian },\n title = { { Development of Effective Connectome from Infancy to Adolescence } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Delineating the normative developmental profile of functional connectome is important for both standardized assessment of individual growth and early detection of diseases. However, functional connectome has been mostly studied using functional connectivity (FC), where undirected connectivity strengths are estimated from statistical correlation of resting-state functional MRI (rs-fMRI) signals. To address this limitation, we applied regression dynamic causal modeling (rDCM) to delineate the developmental trajectories of effective connectivity (EC), the directed causal influence among neuronal populations, in whole-brain networks from infancy to adolescence (0-21 years old) based on high-quality rs-fMRI data from Baby Connectome Project (BCP) and Human Connectome Project Development (HCPD). Analysis with linear mixed model demonstrates significant age effect on the mean nodal EC which is best fit by a \u201cU\u201d shaped quadratic curve with minimal EC at around 2 years old. Further analysis indicates that five brain regions including the left and right cuneus, left precuneus, left supramarginal gyrus and right inferior temporal gyrus have the most significant age effect on nodal EC (p < 0.05, FDR corrected). Moreover, the frontoparietal control (FPC) network shows the fastest increase from early childhood (1-2 years) to adolescence (6-21 years) followed by the visual and salience networks. Our findings suggest complex nonlinear developmental profile of effective connectivity from infancy to adolescence, which may reflect dynamic structural and functional maturation during this critical growth period.", "title":"Development of Effective Connectome from Infancy to Adolescence", "authors":[ "Li, Guoshi", "Thung, Kim-Han", "Taylor, Hoyt", "Wu, Zhengwang", "Li, Gang", "Wang, Li", "Lin, Weili", "Ahmad, Sahar", "Yap, Pew-Thian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":463 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2720_paper.pdf", "bibtext":"@InProceedings{ Li_MultiFrequency_MICCAI2024,\n author = { Li, Hao and Zhai, Xiangyu and Xue, Jie and Gu, Changming and Tian, Baolong and Hong, Tingxuan and Jin, Bin and Li, Dengwang and Huang, Pu },\n title = { { Multi-Frequency and Smoke Attention-aware Learning based Diffusion Model for Removing Surgical Smoke } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Surgical smoke in laparoscopic surgery can deteriorate the visibility and pose hazards to surgeons, although medical devices for mechanical smoke evacuation worked well, it prolonged operative duration and thus restricted the efficiency. This work aims to simultaneously remove the surgical smoke and restore the true-to-live image colors with deep learning strategy to improve the surgical efficiency and safety. However, the deep network-based smoke removal remains a challenge due to: 1) higher frequency modes are hindered from being learned by spectral bias, 2) the distribution of surgical smoke is non-homogeneity. We propose the multi-frequency and smoke attention-aware learning-based diffusion model for removing surgical smoke. In this work, the frequency compensation strategy combines the multi-level frequency learning and contrast enhancement to integrates comprehensive features for learning mid-to-high frequency details that the smoke has obscured. The smoke attention learning employs the pixel-wise measurement and provides the diffusion model with complementary features about where smoke is present, which helps restore the smokeless regions during the inverse diffusion process. And the multi-task learning strategy incorporates L1 loss, smoke perception loss, dark channel prior loss, and contrast enhancement loss to help the model optimization. Additionally, a paired smokeless\/smoky dataset is simulated by a 3D smoke rendering engine. The experimental results show that the proposed method outperforms other state-of-the-art methods on both synthetic\/real laparoscopic surgical images, with the potential to be embedded in laparoscopic devices for smoke removal.", "title":"Multi-Frequency and Smoke Attention-aware Learning based Diffusion Model for Removing Surgical Smoke", "authors":[ "Li, Hao", "Zhai, Xiangyu", "Xue, Jie", "Gu, Changming", "Tian, Baolong", "Hong, Tingxuan", "Jin, Bin", "Li, Dengwang", "Huang, Pu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":464 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3328_paper.pdf", "bibtext":"@InProceedings{ Pol_Letting_MICCAI2024,\n author = { Poles, Isabella and Santambrogio, Marco D. and D\u2019Arnese, Eleonora },\n title = { { Letting Osteocytes Teach SR-microCT Bone Lacunae Segmentation: A Feature Variation Distillation Method via Diffusion Denoising } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Synchrotron Radiation micro-Computed Tomography (SR-microCT) is a promising imaging technique for osteocyte-lacunar bone pathophysiology study. However, acquiring them costs more than histopathology, thus requiring multi-modal approaches to enrich limited\/costly data with complementary information. Nevertheless, paired modalities are rarely available in clinical settings. To overcome these problems, we present a novel histopathology-enhanced disease-aware distillation model for bone microstructure segmentation from SR-microCTs. Our method uses unpaired histopathology images to emphasize lacunae morphology during SR-microCT image training while avoiding the need for histopathologies during testing. Specifically, we leverage denoising diffusion to eliminate the noisy information within the student and distill valuable information effectively. On top of this, a feature variation distillation method pushes the student to learn intra-class semantic variations similar to the teacher, improving label co-occurrence information learning. Experimental results on clinical and public microscopy datasets demonstrate superior performance over single-, multi-modal and state-of-the-art distillation methods for image segmentation.", "title":"Letting Osteocytes Teach SR-microCT Bone Lacunae Segmentation: A Feature Variation Distillation Method via Diffusion Denoising", "authors":[ "Poles, Isabella", "Santambrogio, Marco D.", "D\u2019Arnese, Eleonora" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/isabellapoles\/LOTUS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":465 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2322_paper.pdf", "bibtext":"@InProceedings{ Din_AWasserstein_MICCAI2024,\n author = { Ding, Jiaqi and Dan, Tingting and Wei, Ziquan and Laurienti, Paul and Wu, Guorong },\n title = { { A Wasserstein Recipe for Replicable Machine Learning on Functional Neuroimages } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Advances in neuroimaging have dramatically expanded our ability to probe the neurobiological bases of behavior in-vivo. Leveraging a growing repository of publicly available neuroimaging data, there is a surging interest for utilizing machine learning approaches to explore new questions in neuroscience. Despite the impressive achievements of current deep learning models, there remains an under-acknowledged risk: the variability in cognitive states may undermine the experimental replicability of the ML models, leading to potentially misleading findings in the realm of neuroscience. To address this challenge, we first dissect the critical (but often missed) challenge of ensuring the replicability of predictions despite task-irrelevant functional fluctuations. We then formulate the solution as a domain adaptation, where we design a dual-branch Transformer with minimizing Wasserstein distance. We evaluate the cognitive task recognition accuracy and consistency of test and retest functional neuroimages (serial imaging measures of the same cognitive task over a short period of time) of the Human Connectome Project. Our model demonstrates significant improvements in both replicability and accuracy of task recognition, showing the great potential of reliable deep models for solving real-world neuroscience problems.", "title":"A Wasserstein Recipe for Replicable Machine Learning on Functional Neuroimages", "authors":[ "Ding, Jiaqi", "Dan, Tingting", "Wei, Ziquan", "Laurienti, Paul", "Wu, Guorong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":466 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2142_paper.pdf", "bibtext":"@InProceedings{ Gao_Evidential_MICCAI2024,\n author = { Gao, Yibo and Gao, Zheyao and Gao, Xin and Liu, Yuanye and Wang, Bomin and Zhuang, Xiahai },\n title = { { Evidential Concept Embedding Models: Towards Reliable Concept Explanations for Skin Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Due to the high stakes in medical decision-making, there is a compelling demand for interpretable deep learning methods in medical image analysis. Concept Bottleneck Models (CBM) have emerged as an active interpretable framework incorporating human-interpretable concepts into decision-making. However, their concept predictions may lack reliability when applied to clinical diagnosis, impeding concept explanations\u2019 quality. To address this, we propose an evidential concept embedding model (evi-CEM), which employs evidential learning to model the concept uncertainty. Additionally, we o\ufb00er to leverage the concept uncertainty to rectify concept misalignments that arise when training CBMs using vision-language models without complete concept supervision. With the proposed methods, we can enhance concept explanations\u2019 reliability for both supervised and label-e\ufb03cient settings. Furthermore, we introduce concept uncertainty for e\ufb00ective test-time intervention. Our evaluation demonstrates that evi-CEM achieves superior performance in terms of concept prediction, and the proposed concept recti\ufb01cation effectively mitigates concept misalignments for label-e\ufb03cient training. Our code is available at https:\/\/github.com\/obiyoag\/evi-CEM.", "title":"Evidential Concept Embedding Models: Towards Reliable Concept Explanations for Skin Disease Diagnosis", "authors":[ "Gao, Yibo", "Gao, Zheyao", "Gao, Xin", "Liu, Yuanye", "Wang, Bomin", "Zhuang, Xiahai" ], "id":"Conference", "arxiv_id":"2406.19130", "GitHub":[ "https:\/\/github.com\/obiyoag\/evi-CEM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":467 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3895_paper.pdf", "bibtext":"@InProceedings{ Cho_AdaCBM_MICCAI2024,\n author = { Chowdhury, Townim F. and Phan, Vu Minh Hieu and Liao, Kewen and To, Minh-Son and Xie, Yutong and van den Hengel, Anton and Verjans, Johan W. and Liao, Zhibin },\n title = { { AdaCBM: An Adaptive Concept Bottleneck Model for Explainable and Accurate Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The integration of vision-language models such as CLIP and Concept Bottleneck Models (CBMs) offers a promising approach to explaining deep neural network (DNN) decisions using concepts understandable by humans, addressing the black-box concern of DNNs. While CLIP provides both explainability and zero-shot classification capability, its pre-training on generic image and text data may limit its classification accuracy and applicability to medical image diagnostic tasks, creating a transfer learning problem. To maintain explainability and address transfer learning needs, CBM methods commonly design post-processing modules after the bottleneck module. However, this way has been ineffective. This paper takes an unconventional approach by re-examining the CBM framework through the lens of its geometrical representation as a simple linear classification system. The analysis uncovers that post-CBM fine-tuning modules merely rescale and shift the classification outcome of the system, failing to fully leverage the system\u2019s learning potential. We introduce an adaptive module strategically positioned between CLIP and CBM to bridge the gap between source and downstream domains. This simple yet effective approach enhances classification performance while preserving the explainability afforded by the framework. Our work offers a comprehensive solution that encompasses the entire process, from concept discovery to model training, providing a holistic recipe for leveraging the strengths of GPT, CLIP, and CBM.", "title":"AdaCBM: An Adaptive Concept Bottleneck Model for Explainable and Accurate Diagnosis", "authors":[ "Chowdhury, Townim F.", "Phan, Vu Minh Hieu", "Liao, Kewen", "To, Minh-Son", "Xie, Yutong", "van den Hengel, Anton", "Verjans, Johan W.", "Liao, Zhibin" ], "id":"Conference", "arxiv_id":"2408.02001", "GitHub":[ "https:\/\/github.com\/AIML-MED\/AdaCBM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":468 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0719_paper.pdf", "bibtext":"@InProceedings{ Che_Selfsupervised_MICCAI2024,\n author = { Chen, Dongdong and Yao, Linlin and Liu, Mengjun and Shen, Zhenrong and Hu, Yuqi and Song, Zhiyun and Wang, Qian and Zhang, Lichi },\n title = { { Self-supervised Learning with Adaptive Graph Structure and Function Representation For Cross-Dataset Brain Disorder Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Resting-state functional magnetic resonance imaging (rs-fMRI) helps characterize the regional neural activity of the human brain. Currently, supervised deep learning methods that rely on a large amount of fMRI data have shown good performance in diagnosing specific brain diseases. However, there are significant differences in the structure and function of brain connectivity networks among patients with different brain diseases. This makes it difficult for the model to achieve satisfactory diagnostic performance when facing new diseases with limited data, thus severely hindering their application in clinical practice. In this work, we propose a self-supervised learning framework based on graph contrastive learning for cross-dataset brain disorder diagnosis. Specifically, we develop a graph structure learner that adaptively characterizes general brain connectivity networks for various brain disorders. We further develop a multi-state brain network encoder that can effectively enhance the representation of brain networks with functional information related to different brain diseases. We finally evaluate our model on different brain disorders and demonstrate advantages compared to other state-of-the-art methods.", "title":"Self-supervised Learning with Adaptive Graph Structure and Function Representation For Cross-Dataset Brain Disorder Diagnosis", "authors":[ "Chen, Dongdong", "Yao, Linlin", "Liu, Mengjun", "Shen, Zhenrong", "Hu, Yuqi", "Song, Zhiyun", "Wang, Qian", "Zhang, Lichi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":469 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0870_paper.pdf", "bibtext":"@InProceedings{ Mis_STANLOC_MICCAI2024,\n author = { Mishra, Divyanshu and Saha, Pramit and Zhao, He and Patey, Olga and Papageorghiou, Aris T. and Noble, J. Alison },\n title = { { STAN-LOC: Visual Query-based Video Clip Localization for Fetal Ultrasound Sweep Videos } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Detecting standard frame clips in fetal ultrasound videos is\ncrucial for accurate clinical assessment and diagnosis. It enables health-\ncare professionals to evaluate fetal development, identify abnormalities,\nand monitor overall health with clarity and standardization. To aug-\nment sonographer workflow and to detect standard frame clips, we in-\ntroduce the task of Visual Query-based Video Clip Localization in med-\nical video understanding. It aims to retrieve a video clip from a given\nultrasound sweep that contains frames similar to a given exemplar frame\nof the required standard anatomical view. To solve the task, we propose\nSTAN-LOC that consists of three main components: (a) a Query-Aware\nSpatio-Temporal Fusion Transformer that fuses information available in\nthe visual query with the input video. This results in visual query-aware\nvideo features which we model temporally to understand spatio-temporal\nrelationship between them. (b) a Multi-Anchor, View-Aware Contrastive\nloss to reduce the influence of inherent noise in manual annotations es-\npecially at event boundaries and in videos featuring highly similar ob-\njects. (c) a query selection algorithm during inference that selects the\nbest visual query for a given video to reduce model\u2019s sensitivity to the\nquality of visual queries. We apply STAN-LOC to the task of detect-\ning standard-frame clips in fetal ultrasound heart sweeps given four-\nchamber view queries. Additionally, we assess the performance of our\nbest model on PULSE [2] data for retrieving standard transventricular\nplane (TVP) in fetal head videos. STAN-LOC surpasses the state-of-the-\nart method by 22% in mtIoU. The code will be available upon acceptance\nat xxx.github.com.", "title":"STAN-LOC: Visual Query-based Video Clip Localization for Fetal Ultrasound Sweep Videos", "authors":[ "Mishra, Divyanshu", "Saha, Pramit", "Zhao, He", "Patey, Olga", "Papageorghiou, Aris T.", "Noble, J. Alison" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":470 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4090_paper.pdf", "bibtext":"@InProceedings{ Ren_SkinCON_MICCAI2024,\n author = { Ren, Zhihang and Li, Yunqi and Li, Xinyu and Xie, Xinrong and Duhaime, Erik P. and Fang, Kathy and Chakraborty, Tapabrata and Guo, Yunhui and Yu, Stella X. and Whitney, David },\n title = { { SkinCON: Towards consensus for the uncertainty of skin cancer sub-typing through distribution regularized adaptive predictive sets (DRAPS) } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has been widely utilized in medical diagnosis. Convolutional neural networks and transformers can achieve high predictive accuracy, which can be on par with or even exceed human performance. However, uncertainty quantification remains an unresolved issue, impeding the deployment of deep learning models in practical settings. Conformal analysis can, in principle, estimate the uncertainty of each diagnostic prediction, but doing so effectively requires extensive human annotations to characterize the underlying empirical distributions. This has been challenging in the past because instance-level class distribution data has been unavailable: Collecting massive ground truth labels is already challenging, and obtaining the class distribution of each instance is even more difficult. Here, we provide a large skin cancer instance-level class distribution dataset, SkinCON, that contains 25,331 skin cancer images from the ISIC 2019 challenge dataset. SkinCON is built upon over 937,167 diagnostic judgments from 10,509 participants. Using SkinCON, we propose the distribution regularized adaptive predictive sets (DRAPS) method for skin cancer diagnosis. We also provide a new evaluation metric based on SkinCON. Experiment results show the quality of our proposed DRAPS method and the uncertainty variation with respect to patient age and sex from health equity and fairness perspective. The dataset and code are available at https:\/\/skincon.github.io.", "title":"SkinCON: Towards consensus for the uncertainty of skin cancer sub-typing through distribution regularized adaptive predictive sets (DRAPS)", "authors":[ "Ren, Zhihang", "Li, Yunqi", "Li, Xinyu", "Xie, Xinrong", "Duhaime, Erik P.", "Fang, Kathy", "Chakraborty, Tapabrata", "Guo, Yunhui", "Yu, Stella X.", "Whitney, David" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":471 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1236_paper.pdf", "bibtext":"@InProceedings{ Xio_Contrast_MICCAI2024,\n author = { Xiong, Honglin and Fang, Yu and Sun, Kaicong and Wang, Yulin and Zong, Xiaopeng and Zhang, Weijun and Wang, Qian },\n title = { { Contrast Representation Learning from Imaging Parameters for Magnetic Resonance Image Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Magnetic Resonance Imaging (MRI) is a widely used noninvasive medical imaging technique that provides excellent contrast for soft tissues, making it invaluable for diagnosis and intervention. Acquiring multiple contrast images is often desirable for comprehensive evaluation and precise disease diagnosis. However, due to technical limitations, patient-related issues, and medical conditions, obtaining all desired MRI contrasts is not always feasible. Cross-contrast MRI synthesis can potentially address this challenge by generating target contrasts based on existing source contrasts. In this work, we propose Contrast Representation Learning (CRL), which explores the changes in MRI contrast by modifying MR sequences. Unlike generative models that treat image generation as an end-to-end cross-domain mapping, CRL aims to uncover the complex relationships between contrasts by embracing the interplay of imaging parameters within this space. By doing so, CRL enhances the fidelity and realism of synthesized MR images, providing a more accurate representation of intricate details. Experimental results on the Fast Spin Echo (FSE) sequence demonstrate the promising performance and generalization capability of CRL, even with limited training data. Moreover, CRL introduces a perspective of considering imaging parameters as implicit coordinates, shedding light on the underlying structure governing contrast variation in MR images. Our code is available at\nhttps:\/\/github.com\/xionghonglin\/CRL_MICCAI_2024.", "title":"Contrast Representation Learning from Imaging Parameters for Magnetic Resonance Image Synthesis", "authors":[ "Xiong, Honglin", "Fang, Yu", "Sun, Kaicong", "Wang, Yulin", "Zong, Xiaopeng", "Zhang, Weijun", "Wang, Qian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xionghonglin\/CRL_MICCAI_2024" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":472 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1006_paper.pdf", "bibtext":"@InProceedings{ She_Spatiotemporal_MICCAI2024,\n author = { Shen, Chengzhi and Menten, Martin J. and Bogunovi\u0107, Hrvoje and Schmidt-Erfurth, Ursula and Scholl, Hendrik P. N. and Sivaprasad, Sobha and Lotery, Andrew and Rueckert, Daniel and Hager, Paul and Holland, Robbie },\n title = { { Spatiotemporal Representation Learning for Short and Long Medical Image Time Series } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Analyzing temporal developments is crucial for the accurate prognosis of many medical conditions. Temporal changes that occur over short time scales are key to assessing the health of physiological functions, such as the cardiac cycle. Moreover, tracking longer term developments that occur over months or years in evolving processes, such as age-related macular degeneration (AMD), is essential for accurate prognosis. Despite the importance of both short and long term analysis to clinical decision making, they remain understudied in medical deep learning. State of the art methods for spatiotemporal representation learning, developed for short natural videos, prioritize the detection of temporal constants rather than temporal developments. Moreover, they do not account for varying time intervals between acquisitions, which are essential for contextualizing observed changes. To address these issues, we propose two approaches. First, we combine clip-level contrastive learning with a novel temporal embedding to adapt to irregular time series. Second, we propose masking and predicting latent frame representations of the temporal sequence. Our two approaches outperform all prior methods on temporally-dependent tasks including cardiac output estimation and three prognostic AMD tasks. Overall, this enables the automated analysis of temporal patterns which are typically overlooked in applications of deep learning to medicine.", "title":"Spatiotemporal Representation Learning for Short and Long Medical Image Time Series", "authors":[ "Shen, Chengzhi", "Menten, Martin J.", "Bogunovi\u0107, Hrvoje", "Schmidt-Erfurth, Ursula", "Scholl, Hendrik P. N.", "Sivaprasad, Sobha", "Lotery, Andrew", "Rueckert, Daniel", "Hager, Paul", "Holland, Robbie" ], "id":"Conference", "arxiv_id":"2403.07513", "GitHub":[ "https:\/\/github.com\/Leooo-Shen\/tvrl" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":473 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0213_paper.pdf", "bibtext":"@InProceedings{ Li_Surfacebased_MICCAI2024,\n author = { Li, Yuan and Nie, Xinyu and Zhang, Jianwei and Shi, Yonggang },\n title = { { Surface-based and Shape-informed U-fiber Atlasing for Robust Superficial White Matter Connectivity Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Superficial white matter (SWM) U-fibers contain consider able structural connectivity in the human brain; however, related studies are not well-developed compared to the well-studied deep white matter (DWM). Conventionally, SWM U-fiber is obtained through DWM tracking, which is inaccurate on the cortical surface. The significant variability in the cortical folding patterns of the human brain renders a conventional template-based atlas unsuitable for accurately mapping U-fibers within the thin layer of SWM beneath the cortical surface. Recently, new surface-based tracking methods have been developed to reconstruct more complete and reliable U-fibers. To leverage surface-based U-fiber tracking methods, we propose to create a surface-based U-fiber dictionary using high-resolution diffusion MRI (dMRI) data from the Human Connectome Project (HCP). We first identify the major U-fiber bundles and then build a dictionary containing subjects with high groupwise consistency of major U-fiber bundles. Finally, we propose a shape-informed U-fiber atlasing method for robust SWM connectivity analysis. Through experiments, we demonstrate that our shape-informed atlasing method can obtain anatomically more accurate U-fiber representations than state of-the-art atlas. Additionally, our method is capable of restoring incomplete U-fibers in low-resolution dMRI, thus helping better characterize SWM connectivity in clinical studies such as the Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI).", "title":"Surface-based and Shape-informed U-fiber Atlasing for Robust Superficial White Matter Connectivity Analysis", "authors":[ "Li, Yuan", "Nie, Xinyu", "Zhang, Jianwei", "Shi, Yonggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":474 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3454_paper.pdf", "bibtext":"@InProceedings{ Bap_Keypoint_MICCAI2024,\n author = { Baptista, T\u00e2nia and Raposo, Carolina and Marques, Miguel and Antunes, Michel and Barreto, Joao P. },\n title = { { Keypoint Matching for Instrument-Free 3D Registration in Video-based Surgical Navigation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Video-based Surgical Navigation (VBSN) inside the articular joint using an arthroscopic camera has proven to have important clinical benefits in arthroscopy. It works by referencing the anatomy and instruments with respect to the system of coordinates of a fiducial marker that is rigidly attached to the bone. In order to overlay surgical plans on the anatomy, VBSN performs registration of a pre-operative model with intra-operative data, which is acquired by means of an instrumented touch probe for surface reconstruction. The downside is that this procedure is typically time-consuming and may cause iatrogenic damage to the anatomy. Performing anatomy reconstruction by using solely the arthroscopic video overcomes these problems but raises new ones, namely the difficulty in accomplishing keypoint detection and matching in bone and cartilage regions that are often very low textured. This paper presents a thorough analysis of the performance of classical and learning-based approaches for keypoint matching in arthroscopic images acquired in the knee joint. It is demonstrated that by employing learning-based methods in such imagery, it becomes possible, for the first time, to perform registration in the context of VBSN without the aid of any instruments, i.e., in an instrument-free manner.", "title":"Keypoint Matching for Instrument-Free 3D Registration in Video-based Surgical Navigation", "authors":[ "Baptista, T\u00e2nia", "Raposo, Carolina", "Marques, Miguel", "Antunes, Michel", "Barreto, Joao P." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":475 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2573_paper.pdf", "bibtext":"@InProceedings{ Yan_Spatial_MICCAI2024,\n author = { Yang, Yan and Hossain, Md Zakir and Li, Xuesong and Rahman, Shafin and Stone, Eric },\n title = { { Spatial Transcriptomics Analysis of Zero-shot Gene Expression Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Spatial transcriptomics (ST) captures gene expression fine-grained distinct regions (\\ie, windows) of a tissue slide. Traditional supervised learning frameworks applied to model ST are constrained to predicting expression of gene types seen during training from slide image windows, failing to generalize to unseen gene types. To overcome this limitation, we propose a semantic guided network, a pioneering zero-shot gene expression prediction framework. Considering a gene type can be described by functionality and phenotype, we dynamically embed a gene type to a vector per its functionality and phenotype, and employ this vector to project slide image windows to gene expression in feature space, unleashing zero-shot expression prediction for unseen gene types. The gene type functionality and phenotype are queried with a carefully designed prompt from a pre-trained large language model. On standard benchmark datasets, we demonstrate competitive zero-shot performance compared to past state-of-the-art supervised learning approaches. Our code is available at \\url{https:\/\/github.com\/Yan98\/SGN}.", "title":"Spatial Transcriptomics Analysis of Zero-shot Gene Expression Prediction", "authors":[ "Yang, Yan", "Hossain, Md Zakir", "Li, Xuesong", "Rahman, Shafin", "Stone, Eric" ], "id":"Conference", "arxiv_id":"2401.14772", "GitHub":[ "https:\/\/github.com\/Yan98\/SGN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":476 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0926_paper.pdf", "bibtext":"@InProceedings{ Gho_MammoCLIP_MICCAI2024,\n author = { Ghosh, Shantanu and Poynton, Clare B. and Visweswaran, Shyam and Batmanghelich, Kayhan },\n title = { { Mammo-CLIP: A Vision Language Foundation Model to Enhance Data Efficiency and Robustness in Mammography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The lack of large and diverse training data on Computer-Aided Diagnosis (CAD) in breast cancer detection has been one of the concerns that impedes the adoption of the system. \nRecently, pre-training with large-scale image text datasets via Vision-Language models (VLM) (\\eg CLIP) partially addresses the issue of robustness and data efficiency in computer vision (CV). \nThis paper proposes Mammo-CLIP, the first VLM pre-trained on a substantial amount of screening mammogram-report pairs, addressing the challenges of dataset diversity and size. Our experiments on two public datasets demonstrate strong performance in classifying and localizing various mammographic attributes crucial for breast cancer detection, showcasing data efficiency and robustness similar to CLIP in CV. We also propose Mammo-FActOR, a novel feature attribution method, to provide spatial interpretation of representation with sentence-level granularity within mammography reports. Code is available publicly\\footnote{We will release the model checkpoints upon decision}: \\url{https:\/\/github.com\/annonymous-vision\/miccai}.", "title":"Mammo-CLIP: A Vision Language Foundation Model to Enhance Data Efficiency and Robustness in Mammography", "authors":[ "Ghosh, Shantanu", "Poynton, Clare B.", "Visweswaran, Shyam", "Batmanghelich, Kayhan" ], "id":"Conference", "arxiv_id":"2405.12255", "GitHub":[ "https:\/\/github.com\/batmanlab\/Mammo-CLIP" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.12255", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ "shawn24\/Mammo-CLIP" ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ "shawn24\/Mammo-CLIP" ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":477 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2046_paper.pdf", "bibtext":"@InProceedings{ Xio_Multimodality_MICCAI2024,\n author = { Xiong, Zicheng and Zhao, Kai and Ji, Like and Shu, Xujun and Long, Dazhi and Chen, Shengbo and Yang, Fuxing },\n title = { { Multi-modality 3D CNN Transformer for Assisting Clinical Decision in Intracerebral Hemorrhage } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Intracerebral hemorrhage (ICH) is a cerebrovascular disease with high mortality and morbidity rates. Early-stage ICH patients often lack clear surgical indications, which is quite challenging for neurosurgeons to make treatment decisions. Currently, early treatment decisions for ICH primarily rely on the clinical experience of neurosurgeons. Although there have been attempts to combine local CT imaging with clinical data for decision-making, these approaches fail to provide deep semantic analysis and do not fully leverage the synergistic effects between different modalities. To address this issue, this paper introduces a novel multi-modality predictive model that combines CT images and clinical data to provide reliable treatment decisions for ICH patients. Specifically, this model employs a combination of 3D CNN and Transformer to analyze patients\u2019 brain CT scans, effectively capturing the 3D spatial information of intracranial hematomas and surrounding brain tissue. In addition, it utilizes a contrastive language-image pre-training (CLIP) module to extract demographic features and important clinical data and integrates with CT imaging data through a cross-attention mechanism. Furthermore, a novel CNN-based multilayer perceptron (MLP) layer is designed to enhance the understanding of the 3D spatial features. Extensive experiments conducted on real clinical datasets demonstrate that the proposed method significantly improves the accuracy of treatment decisions compared to existing state-of-the-art methods. Code is available at https:\/\/github.com\/Henry-Xiong\/3DCT-ICH.", "title":"Multi-modality 3D CNN Transformer for Assisting Clinical Decision in Intracerebral Hemorrhage", "authors":[ "Xiong, Zicheng", "Zhao, Kai", "Ji, Like", "Shu, Xujun", "Long, Dazhi", "Chen, Shengbo", "Yang, Fuxing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Henry-Xiong\/3DCT-ICH" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":478 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2842_paper.pdf", "bibtext":"@InProceedings{ Zhu_When_MICCAI2024,\n author = { Zhu, Xi and Zhang, Wei and Li, Yijie and O\u2019Donnell, Lauren J. and Zhang, Fan },\n title = { { When Diffusion MRI Meets Diffusion Model: A Novel Deep Generative Model for Diffusion MRI Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion MRI (dMRI) is an advanced imaging technique characterizing tissue microstructure and white matter structural connectivity of the human brain. The demand for high-quality dMRI data is growing, driven by the need for better resolution and improved tissue contrast. However, acquiring high-quality dMRI data is expensive and time-consuming. In this context, deep generative modeling emerges as a promising solution to enhance image quality while minimizing acquisition costs and scanning time. In this study, we propose a novel generative approach to perform dMRI generation using deep diffusion models. It can generate high dimension (4D) and high resolution data preserving the gradients information and brain structure. We demonstrated our method through an image mapping task aimed at enhancing the quality of dMRI images from 3T to 7T. Our approach demonstrates highly enhanced performance in generating dMRI images when compared to the current state-of-the-art (SOTA) methods. This achievement underscores a substantial progression in enhancing dMRI quality, highlighting the potential of our novel generative approach to revolutionize dMRI imaging standards.", "title":"When Diffusion MRI Meets Diffusion Model: A Novel Deep Generative Model for Diffusion MRI Generation", "authors":[ "Zhu, Xi", "Zhang, Wei", "Li, Yijie", "O\u2019Donnell, Lauren J.", "Zhang, Fan" ], "id":"Conference", "arxiv_id":"2408.12897", "GitHub":[ "https:\/\/github.com\/XiZhu-UE\/Diffusion-model-meet-dMRI.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":479 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2481_paper.pdf", "bibtext":"@InProceedings{ Ngu_Towards_MICCAI2024,\n author = { Nguyen, Anh Tien and Vuong, Trinh Thi Le and Kwak, Jin Tae },\n title = { { Towards a text-based quantitative and explainable histopathology image analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recently, vision-language pre-trained models have emerged in computational pathology. Previous works generally focused on the alignment of image-text pairs via the contrastive pre-training paradigm. Such pre-trained models have been applied to pathology image classification in zero-shot learning or transfer learning fashion. Herein, we hypothesize that the pre-trained vision-language models can be utilized for quantitative histopathology image analysis through a simple image-to-text retrieval. To this end, we propose a Text-based Quantitative and Explainable histopathology image analysis, which we call TQx. Given a set of histopathology images, we adopt a pre-trained vision-language model to retrieve a word-of-interest pool. The retrieved words are then used to quantify the histopathology images and generate understandable feature embeddings due to the direct mapping to the text description. To evaluate the proposed method, the text-based embeddings of four histopathology image datasets are utilized to perform clustering and classification tasks. The results demonstrate that TQx is able to quantify and analyze histopathology images that are comparable to the prevalent visual models in computational pathology.", "title":"Towards a text-based quantitative and explainable histopathology image analysis", "authors":[ "Nguyen, Anh Tien", "Vuong, Trinh Thi Le", "Kwak, Jin Tae" ], "id":"Conference", "arxiv_id":"2407.07360", "GitHub":[ "https:\/\/github.com\/anhtienng\/TQx" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":480 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1493_paper.pdf", "bibtext":"@InProceedings{ Yu_CLIPDR_MICCAI2024,\n author = { Yu, Qinkai and Xie, Jianyang and Nguyen, Anh and Zhao, He and Zhang, Jiong and Fu, Huazhu and Zhao, Yitian and Zheng, Yalin and Meng, Yanda },\n title = { { CLIP-DR: Textual Knowledge-Guided Diabetic Retinopathy Grading with Ranking-aware Prompting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diabetic retinopathy (DR) is a complication of diabetes and usually takes decades to reach sight-threatening levels. Accurate and robust detection of DR severity is critical for the timely management and treatment of diabetes. However, most current DR grading methods suffer from insufficient robustness to data variability (e.g. colour fundus images), posing a significant difficulty for accurate and robust grading. In this work, we propose a novel DR grading framework CLIP-DR based on three observations: 1) Recent pre-trained visual language models, such as CLIP, showcase a notable capacity for generalisation across various downstream tasks, serving as effective baseline models. 2) The grading of image-text pairs for DR often adheres to a discernible natural sequence, yet most existing DR grading methods have primarily overlooked this aspect. 3) A long-tailed distribution among DR severity levels complicates the grading process. This work proposes a novel ranking-aware prompting strategy to help the CLIP model exploit the ordinal information. Specifically, we sequentially design learnable prompts between neighbouring text-image pairs in two different ranking directions. Additionally, we introduce a Similarity Matrix Smooth module into the structure of CLIP to balance the class distribution. Finally, we perform extensive comparisons with several state-of-the-art methods on the GDRBench benchmark, demonstrating our CLIP-DR\u2019s robustness and superior performance. The implementation code is available at https:\/\/github.com\/Qinkaiyu\/CLIP-DR.", "title":"CLIP-DR: Textual Knowledge-Guided Diabetic Retinopathy Grading with Ranking-aware Prompting", "authors":[ "Yu, Qinkai", "Xie, Jianyang", "Nguyen, Anh", "Zhao, He", "Zhang, Jiong", "Fu, Huazhu", "Zhao, Yitian", "Zheng, Yalin", "Meng, Yanda" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Qinkaiyu\/CLIP-DR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":481 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1343_paper.pdf", "bibtext":"@InProceedings{ Ash_DMASTER_MICCAI2024,\n author = { Ashraf, Tajamul and Rangarajan, Krithika and Gambhir, Mohit and Gauba, Richa and Arora, Chetan },\n title = { { D-MASTER: Mask Annealed Transformer for Unsupervised Domain Adaptation in Breast Cancer Detection from Mammograms } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"We focus on the problem of Unsupervised Domain Adaptation (\\uda) for breast cancer detection from mammograms (BCDM) problem. Recent advancements have shown that masked image modeling serves as a robust pretext task for UDA. However, when applied to cross-domain BCDM, these techniques struggle with breast abnormalities such as masses, asymmetries, and micro-calcifications, in part due to the typically much smaller size of region of interest in comparison to natural images. This often results in more false positives per image (FPI) and significant noise in pseudo-labels typically used to bootstrap such techniques. Recognizing these challenges, we introduce a transformer-based Domain-invariant Mask Annealed Student Teacher autoencoder (D-MASTER) framework. D-MASTER adaptively masks and reconstructs multi-scale feature maps, enhancing the model\u2019s ability to capture reliable target domain features. D-MASTER also includes adaptive confidence refinement to filter pseudo-labels, ensuring only high-quality detections are considered. We also provide a bounding box annotated subset of 1000 mammograms from the RSNA Breast Screening Dataset (referred to as RSNA-BSD1K) to support further research in BCDM. We evaluate D-MASTER on multiple BCDM datasets acquired from diverse domains. Experimental results show a significant improvement of 9% and 13% in sensitivity at 0.3 FPI over state-of-the-art UDA techniques on publicly available benchmark INBreast and DDSM datasets respectively. We also report an improvement of 11% and 17% on In-house and RSNA-BSD1K datasets respectively. The source code, pre-trained D-MASTER model, along with RSNA-BSD1K dataset annotations is available at https:\/\/dmaster-iitd.github.io\/webpage.", "title":"D-MASTER: Mask Annealed Transformer for Unsupervised Domain Adaptation in Breast Cancer Detection from Mammograms", "authors":[ "Ashraf, Tajamul", "Rangarajan, Krithika", "Gambhir, Mohit", "Gauba, Richa", "Arora, Chetan" ], "id":"Conference", "arxiv_id":"2407.06585", "GitHub":[ "https:\/\/github.com\/Tajamul21\/D-MASTER" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":482 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2968_paper.pdf", "bibtext":"@InProceedings{ Du_Prompting_MICCAI2024,\n author = { Du, Chenlin and Chen, Xiaoxuan and Wang, Jingyi and Wang, Junjie and Li, Zhongsen and Zhang, Zongjiu and Lao, Qicheng },\n title = { { Prompting Vision-Language Models for Dental Notation Aware Abnormality Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The large pretrained vision-language models (VLMs) have demonstrated remarkable data efficiency when transferred to the medical domain. However, the successful transfer hinges on the development of effective prompting strategies. Despite progress in this area, the application of VLMs to dentistry, a field characterized by complex, multi-level dental abnormalities and subtle features associated with minor dental issues, remains uncharted territory. To address this, we propose a novel approach for detecting dental abnormalities by prompting VLMs, leveraging the symmetrical structure of the oral cavity and guided by the dental notation system. Our framework consists of two main components: dental notation-aware tooth identification and multi-level dental abnormality detection. Initially, we prompt VLMs with tooth notations for enumerating each tooth to aid subsequent detection. We then initiate a multi-level detection of dental abnormalities with quadrant and tooth codes, prompting global abnormalities across the entire image and local abnormalities on the matched teeth. Our method harmonizes subtle features with global information for local-level abnormality detection. Extensive experiments on the re-annotated DETNEX dataset demonstrate that our proposed framework significantly improves performance by at least 4.3% mAP and 10.8% AP50 compared to state-of-the-art methods. Code and annotations will be released on https:\/\/github.com\/CDchenlin\/DentalVLM.", "title":"Prompting Vision-Language Models for Dental Notation Aware Abnormality Detection", "authors":[ "Du, Chenlin", "Chen, Xiaoxuan", "Wang, Jingyi", "Wang, Junjie", "Li, Zhongsen", "Zhang, Zongjiu", "Lao, Qicheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CDchenlin\/DentalVLM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":483 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2317_paper.pdf", "bibtext":"@InProceedings{ Wan_Crossmodal_MICCAI2024,\n author = { Wang, Xiaofei and Huang, Xingxu and Price, Stephen and Li, Chao },\n title = { { Cross-modal Diffusion Modelling for Super-resolved Spatial Transcriptomics } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The recent advancement of spatial transcriptomics (ST) allows to characterize spatial gene expression within tissue for discovery research. However, current ST platforms suffer from low resolution, hindering in-depth understanding of spatial gene expression. Super-resolution approaches promise to enhance ST maps by integrating histology images with gene expressions of profiled tissue spots. However, current super-resolution methods are limited by restoration uncertainty and mode collapse. Although diffusion models have shown promise in capturing complex interactions between multi-modal conditions, it remains a challenge to integrate histology images and gene expression for super-resolved ST maps. This paper proposes a cross-modal conditional diffusion model for super-resolving ST maps with the guidance of histology images. Specifically, we design a multi-modal disentangling network with cross-modal adaptive modulation to utilize complementary information from histology images and spatial gene expression. Moreover, we propose a dynamic cross-attention modelling strategy to extract hierarchical cell-to-tissue information from histology images. Lastly, we propose a co-expression-based gene-correlation graph network to model the co-expression relationship of multiple genes. Experiments show that our method outperforms other state-of-the-art methods in ST super-resolution on three public datasets.", "title":"Cross-modal Diffusion Modelling for Super-resolved Spatial Transcriptomics", "authors":[ "Wang, Xiaofei", "Huang, Xingxu", "Price, Stephen", "Li, Chao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/XiaofeiWang2018\/Diffusion-ST" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":484 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0148_paper.pdf", "bibtext":"@InProceedings{ Zha_AMultiInformation_MICCAI2024,\n author = { Zhang, Jianqiao and Xiong, Hao and Jin, Qiangguo and Feng, Tian and Ma, Jiquan and Xuan, Ping and Cheng, Peng and Ning, Zhiyuan and Ning, Zhiyu and Li, Changyang and Wang, Linlin and Cui, Hui },\n title = { { A Multi-Information Dual-Layer Cross-Attention Model for Esophageal Fistula Prognosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Esophageal fistula (EF) is a critical and life-threatening complication following radiotherapy treatment for esophageal cancer (EC). Albeit tabular clinical data contains other clinically valuable information, it is inherently different from CT images and the heterogeneity among them may impede the effective fusion of multi-modal data and thus degrade the performance of deep learning methods. However, current methodologies do not explicitly address this limitation. To tackle this gap, we present an adaptive multi-information dual-layer cross-attention (MDC) model using both CT images and tabular clinical data for early-stage EF detection before radiotherapy. Our MDC model comprises a clinical data encoder, an adaptive 3D Trans-CNN image encoder, and a dual-layer cross-attention (DualCrossAtt) module. The Image Encoder utilizes both CNN and transformer to extract multi-level local and global features, followed by global depth-wise convolution to remove the redundancy from these features for robust adaptive fusion. To mitigate the heterogeneity among multi-modal features and enhance fusion effectiveness, our DualCrossAtt applies the first layer of a cross-attention mechanism to perform alignment between the features of clinical data and images, generating commonly attended features to the second-layer cross-attention that models the global relationship among multi-modal features for prediction. Furthermore, we introduce a contrastive learning-enhanced hybrid loss function to further boost performance. Comparative evaluations against eight state-of-the-art multi-modality predictive models demonstrate the superiority of our method in EF prediction, with potential to assist personalized stratification and precision EC treatment planning.", "title":"A Multi-Information Dual-Layer Cross-Attention Model for Esophageal Fistula Prognosis", "authors":[ "Zhang, Jianqiao", "Xiong, Hao", "Jin, Qiangguo", "Feng, Tian", "Ma, Jiquan", "Xuan, Ping", "Cheng, Peng", "Ning, Zhiyuan", "Ning, Zhiyu", "Li, Changyang", "Wang, Linlin", "Cui, Hui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":485 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1091_paper.pdf", "bibtext":"@InProceedings{ Guo_Stochastic_MICCAI2024,\n author = { Guo, Yixiao and Pei, Yuru and Chen, Si and Zhou, Zhi-bo and Xu, Tianmin and Zha, Hongbin },\n title = { { Stochastic Anomaly Simulation for Maxilla Completion from Cone-Beam Computed Tomography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated alveolar cleft defect restoration from cone beam computed tomography (CBCT) remains a challenging task, considering large morphological variations due to inter-subject abnormal maxilla development processes and a small cohort of clinical data. Existing works relied on rigid or deformable registration to borrow bony tissues from an unaffected side or a template for bony tissue filling. However, they lack harmony with the surrounding irregular maxilla structures and are limited when faced with bilateral defects. In this paper, we present a stochastic anomaly simulation algorithm for defected CBCT generation, combating limited clinical data and burdensome volumetric image annotation. By respecting the facial fusion process, the proposed anomaly simulation algorithm enables plausible data generation and relieves gaps from clinical data. We propose a weakly supervised volumetric inpainting framework for cleft defect restoration and maxilla completion, taking advantage of anomaly simulation-based data generation and the recent success of deep image inpainting techniques. Extensive experimental results demonstrate that our approach effectively restores defected CBCTs with performance gains over state-of-the-art methods.", "title":"Stochastic Anomaly Simulation for Maxilla Completion from Cone-Beam Computed Tomography", "authors":[ "Guo, Yixiao", "Pei, Yuru", "Chen, Si", "Zhou, Zhi-bo", "Xu, Tianmin", "Zha, Hongbin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Code-11342\/SAS-Restorer.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":486 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2909_paper.pdf", "bibtext":"@InProceedings{ Xie_VDPF_MICCAI2024,\n author = { Xie, Xiaotong and Ye, Yufeng and Yang, Tingting and Huang, Bin and Huang, Bingsheng and Huang, Yi },\n title = { { VDPF: Enhancing DVT Staging Performance Using a Global-Local Feature Fusion Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep Vein Thrombosis (DVT) presents a high incidence rate and serious health risks. Therefore, accurate staging is essential for formulating effective treatment plans and enhancing prognosis. Recent studies have shown the effectiveness of Black-blood Magnetic Resonance Thrombus Imaging (BTI) in differentiating thrombus stages without necessitating contrast agents. However, the accuracy of clinical DVT staging is still limited by the experience and subjective assessments of radiologists, underscoring the importance of implementing Computer-aided Diagnosis (CAD) systems for objective and precise thrombus staging. Given the small size of thrombi and their high similarity in signal intensity and shape to surrounding tissues, precise staging using CAD technology poses a significant challenge. To address this, we have developed an innovative classification framework that employs a Global-Local Feature Fusion Module (GLFM) for the effective integration of global imaging and lesion-focused local imaging. Within the GLFM, a cross-attention module is designed to capture relevant global features information based on local features. Additionally, the Feature Fusion Focus Network (FFFN) module within the GLFM facilitates the integration of features across various dimensions. The synergy between these modules ensures an effective fusion of local and global features within the GLFM framework. Experimental evidence confirms the superior performance of our proposed GLFM in feature fusion, demonstrating a significant advantage over existing methods in the task of DVT staging. The code is available at https:\/\/github.com\/xiextong\/VDPF.", "title":"VDPF: Enhancing DVT Staging Performance Using a Global-Local Feature Fusion Network", "authors":[ "Xie, Xiaotong", "Ye, Yufeng", "Yang, Tingting", "Huang, Bin", "Huang, Bingsheng", "Huang, Yi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xiextong\/VDPF" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":487 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1321_paper.pdf", "bibtext":"@InProceedings{ Ten_KnowledgeGuided_MICCAI2024,\n author = { Teng, Lin and Zhao, Zihao and Huang, Jiawei and Cao, Zehong and Meng, Runqi and Shi, Feng and Shen, Dinggang },\n title = { { Knowledge-Guided Prompt Learning for Lifespan Brain MR Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic and accurate segmentation of brain MR images throughout the human lifespan into tissue and structure is crucial for understanding brain development and diagnosing diseases. However, challenges arise from the intricate variations in brain appearance due to rapid early brain development, aging, and disorders, compounded by the limited availability of manually labeled datasets. In response, we present a two-step segmentation framework employing Knowledge-Guided Prompt Learning (KGPL) for brain MRI. Specifically, we first pre-train segmentation models on large-scale datasets with sub-optimal labels, followed by the incorporation of knowledge-driven embeddings learned from image-text alignment into the models. The introduction of knowledge-wise prompts captures semantic relationships between anatomical variability and biological processes, enabling models to learn structural feature embeddings across diverse age groups. Experimental findings demonstrate the superiority and robustness of our proposed method, particularly noticeable when employing Swin UNETR as the backbone. Our approach achieves average DSC values of 95.17% and 94.19% for brain tissue and structure segmentation, respectively. Our code is available at https:\/\/github.com\/TL9792\/KGPL.", "title":"Knowledge-Guided Prompt Learning for Lifespan Brain MR Image Segmentation", "authors":[ "Teng, Lin", "Zhao, Zihao", "Huang, Jiawei", "Cao, Zehong", "Meng, Runqi", "Shi, Feng", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"2407.21328", "GitHub":[ "https:\/\/github.com\/TL9792\/KGPL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":488 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2162_paper.pdf", "bibtext":"@InProceedings{ Cho_Understanding_MICCAI2024,\n author = { Chow, Chiyuen and Dan, Tingting and Styner, Martin and Wu, Guorong },\n title = { { Understanding Brain Dynamics Through Neural Koopman Operator with Structure-Function Coupling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The fundamental question in neuroscience is to understand the working mechanism of how anatomical structure supports brain function and how remarkable functional fluctuations emerge ubiquitous behaviors. We formulate this inverse problem in the realm of system identification, where we use a geometric scattering transform (GST) to model the structure-function coupling and a neural Koopman operator to uncover dynamic mechanism of the underlying complex system. First, GST is used to construct a collection of measurements by projecting the proxy signal of brain activity into a neural manifold constrained by the geometry of wiring patterns in the brain. Then, we seek to find a Koopman operator to elucidate the complex relationship between partial observations and behavior outcomes with a relatively simpler linear mapping, which allows us to understand functional dynamics in the clich'e of control system. Furthermore, we integrate GST and Koopman operator into an end-to-end deep neural network, yielding an explainable model for brain dynamics with a mathematical guarantee. Through rigorous experiments conducted on the Human Connectome Project-Aging (HCP-A) dataset, our method demonstrates state-of-the-art performance in cognitive task classification, surpassing existing benchmarks. More importantly, our method shows great potential in uncovering novel insights of brain dynamics using machine learning approach.", "title":"Understanding Brain Dynamics Through Neural Koopman Operator with Structure-Function Coupling", "authors":[ "Chow, Chiyuen", "Dan, Tingting", "Styner, Martin", "Wu, Guorong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":489 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1737_paper.pdf", "bibtext":"@InProceedings{ Hua_TopologicalCycle_MICCAI2024,\n author = { Huang, Jinghan and Chen, Nanguang and Qiu, Anqi },\n title = { { Topological Cycle Graph Attention Network for Brain Functional Connectivity } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study, we introduce a novel Topological Cycle Graph Attention Network (CycGAT), designed to delineate a functional backbone within brain functional graphs\u2014key pathways essential for signal transmission\u2014from non-essential, redundant connections that form cycles around this core structure. We first introduce a cycle incidence matrix that establishes an independent cycle basis within a graph, mapping its relationship with edges. We propose a cycle graph convolution that leverages a cycle adjacency matrix, derived from the cycle incidence matrix, to specifically filter edge signals in a domain of cycles. Additionally, we strengthen the representation power of the cycle graph convolution by adding an attention mechanism, which is further augmented by the introduction of edge positional encodings in cycles, to enhance the topological awareness of CycGAT. We demonstrate CycGAT\u2019s localization through simulation and its efficacy on an ABCD study\u2019s fMRI data (n=8765), comparing it with baseline models. CycGAT outperforms these models, identifying a functional backbone with significantly fewer cycles, crucial for understanding neural circuits related to general intelligence. Our code will be released once accepted.", "title":"Topological Cycle Graph Attention Network for Brain Functional Connectivity", "authors":[ "Huang, Jinghan", "Chen, Nanguang", "Qiu, Anqi" ], "id":"Conference", "arxiv_id":"2403.19149", "GitHub":[ "https:\/\/github.com\/JH-415\/CycGAT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":490 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1820_paper.pdf", "bibtext":"@InProceedings{ Als_Zoom_MICCAI2024,\n author = { Alsharid, Mohammad and Yasrab, Robail and Drukker, Lior and Papageorghiou, Aris T. and Noble, J. Alison },\n title = { { Zoom Pattern Signatures for Fetal Ultrasound Structures } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"During a fetal ultrasound scan, a sonographer will zoom in and zoom out as they attempt to get clearer images of the anatomical structures of interest. This paper explores how to use this zoom information which is an under-utilised piece of information that is extractable from fetal ultrasound images. We explore associating zooming patterns to specific structures. The presence of such patterns would indicate that each individual anatomical structure has a unique signature associated with it, thereby allowing for classification of fetal ultrasound clips without directly feeding the actual fetal ultrasound content into a convolutional neural network.", "title":"Zoom Pattern Signatures for Fetal Ultrasound Structures", "authors":[ "Alsharid, Mohammad", "Yasrab, Robail", "Drukker, Lior", "Papageorghiou, Aris T.", "Noble, J. Alison" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":491 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3255_paper.pdf", "bibtext":"@InProceedings{ Yan_MambaMIL_MICCAI2024,\n author = { Yang, Shu and Wang, Yihui and Chen, Hao },\n title = { { MambaMIL: Enhancing Long Sequence Modeling with Sequence Reordering in Computational Pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multiple Instance Learning (MIL) has emerged as a dominant paradigm to extract discriminative feature representations within Whole Slide Images (WSIs) in computational pathology. Despite driving notable progress, existing MIL approaches suffer from limitations in facilitating comprehensive and efficient interactions among instances, as well as challenges related to time-consuming computations and overfitting. In this paper, we incorporate the Selective Scan Space State Sequential Model (Mamba) in Multiple Instance Learning (MIL) for long sequence modeling with linear complexity, termed as MambaMIL. By inheriting the capability of vanilla Mamba, MambaMIL demonstrates the ability to comprehensively understand and perceive long sequences of instances. Furthermore, we propose the Sequence Reordering Mamba (SR-Mamba) aware of the order and distribution of instances, which exploits the inherent valuable information embedded within the long sequences. With the SR-Mamba as the core component, MambaMIL can effectively capture more discriminative features and mitigate the challenges associated with overfitting and high computational overhead. Extensive experiments on two public challenging tasks across nine diverse datasets demonstrate that our proposed framework performs favorably against state-of-the-art MIL methods. The code is released at https:\/\/github.com\/isyangshu\/MambaMIL.", "title":"MambaMIL: Enhancing Long Sequence Modeling with Sequence Reordering in Computational Pathology", "authors":[ "Yang, Shu", "Wang, Yihui", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2403.06800", "GitHub":[ "https:\/\/github.com\/isyangshu\/MambaMIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":492 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0771_paper.pdf", "bibtext":"@InProceedings{ Che_LighTDiff_MICCAI2024,\n author = { Chen, Tong and Lyu, Qingcheng and Bai, Long and Guo, Erjian and Gao, Huxin and Yang, Xiaoxiao and Ren, Hongliang and Zhou, Luping },\n title = { { LighTDiff: Surgical Endoscopic Image Low-Light Enhancement with T-Diffusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Advances in endoscopy use in surgeries face challenges like inadequate lighting. Deep learning, notably the Denoising Diffusion Probabilistic Model (DDPM), holds promise for low-light image enhancement in the medical field. However, DDPMs are computationally demanding and slow, limiting their practical medical applications. To bridge this gap, we propose a lightweight DDPM, dubbed LighTDiff. It adopts a T-shape model architecture to capture global structural information using low-resolution images and gradually recover the details in subsequent denoising steps. We further prone the model to significantly reduce the model size while retaining performance. While discarding certain downsampling operations to save parameters leads to instability and low efficiency in convergence during the training, we introduce a Temporal Light Unit (TLU), a plug-and-play module, for more stable training and better performance. TLU associates time steps with denoised image features, establishing temporal dependencies of the denoising steps and improving denoising outcomes. Moreover, while recovering images using the diffusion model, potential spectral shifts were noted. We further introduce a Chroma Balancer (CB) to mitigate this issue. Our LighTDiff outperforms many competitive LLIE methods with exceptional computational efficiency.", "title":"LighTDiff: Surgical Endoscopic Image Low-Light Enhancement with T-Diffusion", "authors":[ "Chen, Tong", "Lyu, Qingcheng", "Bai, Long", "Guo, Erjian", "Gao, Huxin", "Yang, Xiaoxiao", "Ren, Hongliang", "Zhou, Luping" ], "id":"Conference", "arxiv_id":"2405.10550", "GitHub":[ "https:\/\/github.com\/DavisMeee\/LighTDiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":493 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3528_paper.pdf", "bibtext":"@InProceedings{ Saa_PEMMA_MICCAI2024,\n author = { Saadi, Nada and Saeed, Numan and Yaqub, Mohammad and Nandakumar, Karthik },\n title = { { PEMMA: Parameter-Efficient Multi-Modal Adaptation for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Imaging modalities such as Computed Tomography (CT) and Positron Emission Tomography (PET) are key in cancer detection, inspiring Deep Neural Networks (DNN) models that merge these scans for tumor segmentation. When both CT and PET scans are available, it is common to combine them as two channels of the input to the segmentation model. However, this method requires both scan types during training and inference, posing a challenge due to the limited availability of PET scans, thereby sometimes limiting the process to CT scans only. Hence, there is a need to develop a flexible DNN architecture that can be trained\/updated using only CT scans but can effectively utilize PET scans when they become available. In this work, we propose a parameter-efficient multi-modal adaptation (PEMMA) framework for lightweight upgrading of a transformer-based segmentation model trained only on CT scans to also incorporate PET scans. The benefits of the proposed approach are two-fold. Firstly, we leverage the inherent modularity of the transformer architecture and perform low-rank adaptation (LoRA) of the attention weights to achieve parameter-efficient adaptation. Secondly, since the PEMMA framework attempts to minimize cross-modal entanglement, it is possible to subsequently update the combined model using only one modality, without causing catastrophic forgetting of the other modality. Our proposed method achieves comparable results with the performance of early fusion techniques with just 8% of the trainable parameters, especially with a remarkable +28% improvement on the average dice score on PET scans when trained on a single modality.", "title":"PEMMA: Parameter-Efficient Multi-Modal Adaptation for Medical Image Segmentation", "authors":[ "Saadi, Nada", "Saeed, Numan", "Yaqub, Mohammad", "Nandakumar, Karthik" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":494 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0897_paper.pdf", "bibtext":"@InProceedings{ Tan_HySparK_MICCAI2024,\n author = { Tang, Fenghe and Xu, Ronghao and Yao, Qingsong and Fu, Xueming and Quan, Quan and Zhu, Heqin and Liu, Zaiyi and Zhou, S. Kevin },\n title = { { HySparK: Hybrid Sparse Masking for Large Scale Medical Image Pre-Training } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"The generative self-supervised learning strategy exhibits remarkable learning representational capabilities. However, there is limited attention to end-to-end pre-training methods based on a hybrid architecture of CNN and Transformer, which can learn strong local and global representations simultaneously. To address this issue, we propose a generative pre-training strategy called Hybrid Sparse masKing (HySparK) based on masked image modeling and apply it to large-scale pre-training on medical images. First, we perform a bottom-up 3D hybrid masking strategy on the encoder to keep consistency masking. Then we utilize sparse convolution for the top CNNs and encode unmasked patches for the bottom vision Transformers. Second, we employ a simple hierarchical decoder with skip-connections to achieve dense multi-scale feature reconstruction. Third, we implement our pre-training method on a collection of multiple large-scale 3D medical imaging datasets. Extensive experiments indicate that our proposed pre-training strategy demonstrates robust transfer-ability in supervised downstream tasks and sheds light on HySparK\u2019s promising prospects. The code is available at https:\/\/github.com\/FengheTan9\/HySparK.", "title":"HySparK: Hybrid Sparse Masking for Large Scale Medical Image Pre-Training", "authors":[ "Tang, Fenghe", "Xu, Ronghao", "Yao, Qingsong", "Fu, Xueming", "Quan, Quan", "Zhu, Heqin", "Liu, Zaiyi", "Zhou, S. Kevin" ], "id":"Conference", "arxiv_id":"2408.05815", "GitHub":[ "https:\/\/github.com\/FengheTan9\/HySparK" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":495 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0728_paper.pdf", "bibtext":"@InProceedings{ Xu_Temporal_MICCAI2024,\n author = { Xu, Jingwen and Zhu, Ye and Lyu, Fei and Wong, Grace Lai-Hung and Yuen, Pong C. },\n title = { { Temporal Neighboring Multi-Modal Transformer with Missingness-Aware Prompt for Hepatocellular Carcinoma Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Early prediction of hepatocellular carcinoma (HCC) is necessary to facilitate appropriate surveillance strategy and reduce cancer mortality. Incorporating CT scans and clinical time series can greatly increase the accuracy of predictive models. However, there are two challenges to effective multi-modal learning: (a) CT scans and clinical time series can be asynchronous and irregularly sampled. (b) CT scans are often missing compared with clinical time series. To tackle the above challenges, we propose a Temporal Neighboring Multi-modal Transformer with Missingness-Aware Prompt (\\textbf{TNformer-MP}) to integrate clinical time series and available CT scans for HCC prediction. Specifically, to explore the inter-modality temporal correspondence, TNformer-MP exploits a Temporal Neighboring Multimodal Tokenizer (\\textbf{TN-MT}) to fuse the CT embedding into its multiple-scale neighboring tokens from clinical time series. To mitigate the performance drop caused by missing CT modality, TNformer-MP exploits a Missingness-aware Prompt-driven Multimodal Tokenizer (\\textbf{MP-MT}) that adopts missingness-aware prompts to adjust the encoding of clinical time series tokens. Experiments conducted on a largescale multimodal datasets of 36,353 patients show that our method achieves superior performance with existing methods.", "title":"Temporal Neighboring Multi-Modal Transformer with Missingness-Aware Prompt for Hepatocellular Carcinoma Prediction", "authors":[ "Xu, Jingwen", "Zhu, Ye", "Lyu, Fei", "Wong, Grace Lai-Hung", "Yuen, Pong C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/LyapunovStability\/TNformer-MP.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":496 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2995_paper.pdf", "bibtext":"@InProceedings{ Zhe_Rethinking_MICCAI2024,\n author = { Zheng, Zixuan and Shi, Yilei and Li, Chunlei and Hu, Jingliang and Zhu, Xiao Xiang and Mou, Lichao },\n title = { { Rethinking Cell Counting Methods: Decoupling Counting and Localization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cell counting in microscopy images is vital in medicine and biology but extremely tedious and time-consuming to perform manually. While automated methods have advanced in recent years, state-of-the-art approaches tend to increasingly complex model designs. In this paper, we propose a conceptually simple yet effective decoupled learning scheme for automated cell counting, consisting of separate counter and localizer networks. In contrast to jointly learning counting and density map estimation, we show that decoupling these objectives surprisingly improves results. The counter operates on intermediate feature maps rather than pixel space to leverage global context and produce count estimates, while also generating coarse density maps. The localizer then reconstructs high-resolution density maps that precisely localize individual cells, conditional on the original images and coarse density maps from the counter. Besides, to boost counting accuracy, we further introduce a global message passing module to integrate cross-region patterns. Extensive experiments on four datasets demonstrate that our approach, despite its simplicity, challenges common practice and achieves state-of-the-art performance by significant margins. Our key insight is that decoupled learning alleviates the need to learn counting on high-resolution density maps directly, allowing the model to focus on global features critical for accurate estimates. Code is available at https:\/\/github.com\/MedAITech\/DCL.", "title":"Rethinking Cell Counting Methods: Decoupling Counting and Localization", "authors":[ "Zheng, Zixuan", "Shi, Yilei", "Li, Chunlei", "Hu, Jingliang", "Zhu, Xiao Xiang", "Mou, Lichao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MedAITech\/DCL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":497 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0165_paper.pdf", "bibtext":"@InProceedings{ Han_Advancing_MICCAI2024,\n author = { Han, Woojung and Kim, Chanyoung and Ju, Dayun and Shim, Yumin and Hwang, Seong Jae },\n title = { { Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advances in text-conditioned image generation diffusion models have begun paving the way for new opportunities in modern medical domain, in particular, generating Chest X-rays (CXRs) from diagnostic reports. Nonetheless, to further drive the diffusion models to generate CXRs that faithfully reflect the complexity and diversity of real data, it has become evident that a nontrivial learning approach is needed. In light of this, we propose CXRL, a framework motivated by the potential of reinforcement learning (RL). Specifically, we integrate a policy gradient RL approach with well-designed multiple distinctive CXR-domain specific reward models. This approach guides the diffusion denoising trajectory, achieving precise CXR posture and pathological details. Here, considering the complex medical image environment, we present \u201cRL with Comparative Feedback\u201d (RLCF) for the reward mechanism, a human-like comparative evaluation that is known to be more effective and reliable in complex scenarios compared to direct evaluation. Our CXRL framework includes jointly optimizing learnable adaptive condition embeddings (ACE) and the image generator, enabling the model to produce more accurate and higher perceptual CXR quality. Our extensive evaluation of the MIMIC-CXR-JPG dataset demonstrates the effectiveness of our RL-based tuning approach. Consequently, our CXRL generates pathologically realistic CXRs, establishing a new standard for generating CXRs with high fidelity to real-world clinical scenarios.", "title":"Advancing Text-Driven Chest X-Ray Generation with Policy-Based Reinforcement Learning", "authors":[ "Han, Woojung", "Kim, Chanyoung", "Ju, Dayun", "Shim, Yumin", "Hwang, Seong Jae" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MICV-yonsei\/CXRL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":498 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2799_paper.pdf", "bibtext":"@InProceedings{ Bay_BiasPruner_MICCAI2024,\n author = { Bayasi, Nourhan and Fayyad, Jamil and Bissoto, Alceu and Hamarneh, Ghassan and Garbi, Rafeef },\n title = { { BiasPruner: Debiased Continual Learning for Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Continual Learning (CL) is crucial for enabling networks to dynamically adapt as they learn new tasks sequentially, accommodating new data and classes without catastrophic forgetting. Diverging from conventional perspectives on CL, our paper introduces a new perspective wherein forgetting could actually benefit sequential learning paradigm. Specifically, we present BiasPruner, a CL framework that intentionally forgets spurious correlations in the training data that could lead to shortcut learning. Utilizing a new bias score that measures the contribution of each unit in the network to learning spurious features, BiasPruner prunes those units with the highest bias scores to form a debiased subnetwork preserved for a given task. As BiasPruner learns a new task, it constructs a new debiased subnetwork, potentially incorporating units from previous subnetworks, which improves adaptation and performance on the new task. During inference, BiasPruner employs a simple task-agnostic approach to select the best debiased subnetwork for predictions. We conduct experiments on three medical datasets for skin lesion classification and chest X-RAY classification and demonstrate that BiasPruner consistently outperforms SOTA CL methods in terms of classification performance and fairness. Our code is available at: Link.", "title":"BiasPruner: Debiased Continual Learning for Medical Image Classification", "authors":[ "Bayasi, Nourhan", "Fayyad, Jamil", "Bissoto, Alceu", "Hamarneh, Ghassan", "Garbi, Rafeef" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/nourhanb\/BiasPruner" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":499 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1365_paper.pdf", "bibtext":"@InProceedings{ Lia_IterMask2_MICCAI2024,\n author = { Liang, Ziyun and Guo, Xiaoqing and Noble, J. Alison and Kamnitsas, Konstantinos },\n title = { { IterMask2: Iterative Unsupervised Anomaly Segmentation via Spatial and Frequency Masking for Brain Lesions in MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Unsupervised anomaly segmentation approaches to pathology segmentation train a model on images of healthy subjects, that they define as the `normal\u2019 data distribution. At inference, they aim to segment any pathologies in new images as \u2018anomalies\u2019, as they exhibit patterns that deviate from those in \u2018normal\u2019 training data. Prevailing methods follow the \u2018corrupt-and-reconstruct\u2019 paradigm. They intentionally corrupt an input image, reconstruct it to follow the learned \u2018normal\u2019 distribution, and subsequently segment anomalies based on reconstruction error. Corrupting an input image, however, inevitably leads to suboptimal reconstruction even of normal regions, causing false positives. To alleviate this, we propose a novel iterative spatial mask-refining strategy IterMask2. We iteratively mask areas of the image, reconstruct them, and update the mask based on reconstruction error. This iterative process progressively adds information about areas that are confidently normal as per the model. The increasing content guides reconstruction of nearby masked areas, improving reconstruction of normal tissue under these areas, reducing false positives. We also use high-frequency image content as an auxiliary input to provide additional structural information for masked areas. This further improves reconstruction error of normal in comparison to anomalous areas, facilitating segmentation of the latter. We conduct experiments on several brain lesion datasets and demonstrate effectiveness of our method. Code will be published at: https:\/\/github.com\/ZiyunLiang\/IterMask2", "title":"IterMask2: Iterative Unsupervised Anomaly Segmentation via Spatial and Frequency Masking for Brain Lesions in MRI", "authors":[ "Liang, Ziyun", "Guo, Xiaoqing", "Noble, J. Alison", "Kamnitsas, Konstantinos" ], "id":"Conference", "arxiv_id":"2406.02422", "GitHub":[ "https:\/\/github.com\/ZiyunLiang\/IterMask2" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":500 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3284_paper.pdf", "bibtext":"@InProceedings{ Hua_TopologicalGCN_MICCAI2024,\n author = { Huang, Tianxiang and Shi, Jing and Jin, Ge and Li, Juncheng and Wang, Jun and Du, Jun and Shi, Jun },\n title = { { Topological GCN for Improving Detection of Hip Landmarks from B-Mode Ultrasound Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The B-mode ultrasound based computer-aided diagnosis (CAD) has demon-strated its effectiveness for diagnosis of Developmental Dysplasia of the Hip (DDH) in infants. However, due to effect of speckle noise in ultrasound im-ages, it is still a challenge task to accurately detect hip landmarks. In this work, we propose a novel hip landmark detection model by integrating the Topological GCN (TGCN) with an Improved Conformer (TGCN-ICF) into a unified framework to improve detection performance. The TGCN-ICF in-cludes two subnetworks: an Improved Conformer (ICF) subnetwork to gen-erate heatmaps and a TGCN subnetwork to additionally refine landmark de-tection. This TGCN can effectively improve detection accuracy with the guidance of class labels. Moreover, a Mutual Modulation Fusion (MMF) module is developed for deeply exchanging and fusing the features extracted from the U-Net and Transformer branches in ICF. The experimental results on the real DDH dataset demonstrate that the proposed TGCN-ICF outper-forms all the compared algorithms.", "title":"Topological GCN for Improving Detection of Hip Landmarks from B-Mode Ultrasound Images", "authors":[ "Huang, Tianxiang", "Shi, Jing", "Jin, Ge", "Li, Juncheng", "Wang, Jun", "Du, Jun", "Shi, Jun" ], "id":"Conference", "arxiv_id":"2408.13495", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":501 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0712_paper.pdf", "bibtext":"@InProceedings{ Zhu_SelfRegUNet_MICCAI2024,\n author = { Zhu, Wenhui and Chen, Xiwen and Qiu, Peijie and Farazi, Mohammad and Sotiras, Aristeidis and Razi, Abolfazl and Wang, Yalin },\n title = { { SelfReg-UNet: Self-Regularized UNet for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Since its introduction, UNet has been leading a variety of medical image segmentation tasks. Although numerous follow-up studies have also been dedicated to improving the performance of standard UNet, few have conducted in-depth analyses of the underlying interest pattern of UNet in medical image segmentation. In this paper, we explore the patterns learned in a UNet and observe two important factors that potentially affect its performance: (i) irrelative feature learned caused by asymmetric supervision; (ii) feature redundancy in the feature map. To this end, we propose to balance the supervision between encoder and decoder and reduce the redundant information in the UNet. Specifically, we use the feature map that contains the most semantic information (i.e., the last layer of the decoder) to provide additional supervision to other blocks to provide additional supervision and reduce feature redundancy by leveraging feature distillation. The proposed method can be easily integrated into existing UNet architecture in a plug-and-play fashion with negligible computational cost. The experimental results suggest that the proposed method consistently improves the performance of standard UNets on four medical image segmentation datasets.", "title":"SelfReg-UNet: Self-Regularized UNet for Medical Image Segmentation", "authors":[ "Zhu, Wenhui", "Chen, Xiwen", "Qiu, Peijie", "Farazi, Mohammad", "Sotiras, Aristeidis", "Razi, Abolfazl", "Wang, Yalin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ChongQingNoSubway\/SelfReg-UNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":502 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1197_paper.pdf", "bibtext":"@InProceedings{ Li_Dynamic_MICCAI2024,\n author = { Li, Xiao-Xin and Zhu, Fang-Zheng and Yang, Junwei and Chen, Yong and Shen, Dinggang },\n title = { { Dynamic Hybrid Unrolled Multi-Scale Network for Accelerated MRI Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"In accelerated magnetic resonance imaging (MRI) reconstruction, the anatomy of a patient is recovered from a set of under-sampled measurements. Currently, unrolled hybrid architectures, incorporating both the beneficial bias of convolutions with the power of Transformers have been proven to be successful in solving this ill-posed inverse problem. The multi-scale strategy of the intra-cascades and that of the inter-cascades are used to decrease the high compute cost of Transformers and to rectify the spectral bias of Transformers, respectively. In this work, we proposed a dynamic Hybrid Unrolled Multi-Scale Network (dHUMUS-Net) by incorporating the two multi-scale strategies. A novel Optimal Scale Estimation Network is presented to dynamically create or choose the multi-scale Transformer-based modules in all cascades of dHUMUS-Net. Our dHUMUS-Net achieves significant improvements over the state-of-the-art methods on the publicly available fastMRI dataset.", "title":"Dynamic Hybrid Unrolled Multi-Scale Network for Accelerated MRI Reconstruction", "authors":[ "Li, Xiao-Xin", "Zhu, Fang-Zheng", "Yang, Junwei", "Chen, Yong", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":503 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2541_paper.pdf", "bibtext":"@InProceedings{ Lin_ClassBalancing_MICCAI2024,\n author = { Lin, Hongxin and Zhang, Chu and Wang, Mingyu and Huang, Bin and Shao, Jingjing and Zhang, Jinxiang and Gao, Zhenhua and Diao, Xianfen and Huang, Bingsheng },\n title = { { Class-Balancing Deep Active Learning with Auto-Feature Mixing and Minority Push-Pull Sampling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep neural networks demand large-scale labeled dataset for optimal performance, yet the cost of annotation remains high. Deep active learning (DAL) offers a promising approach to reduce annotation cost while maintaining performance. However, traditional DAL methods often fail to balance performance and computational efficiency, and overlook the challenge posed by class imbalance. To address these challenges, we propose a novel framework, named Class- Balancing Deep Active Learning(CB-DAL), comprising two key modules: auto-mode feature mixing(Auto-FM) and minority push-pull sampling(MPPS). Auto-FM identifies informative samples by simply detecting in inconsistencies in predicted labels after feature mixing, while MPPS mitigates the class imbalance within the selected training pool by selecting candidates whose features close to the minority class centroid while distant from features of the labelled majority class. Evaluated across varying class imbalance ratios and dataset scales, CB-DAL outperforms traditional DAL methods and the counterparts designed for imbalanced dataset. Our method provides a simple yet effective solution to the class imbalance problem in DAL ,with broad potential applications.", "title":"Class-Balancing Deep Active Learning with Auto-Feature Mixing and Minority Push-Pull Sampling", "authors":[ "Lin, Hongxin", "Zhang, Chu", "Wang, Mingyu", "Huang, Bin", "Shao, Jingjing", "Zhang, Jinxiang", "Gao, Zhenhua", "Diao, Xianfen", "Huang, Bingsheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":504 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4059_paper.pdf", "bibtext":"@InProceedings{ Fen_Diversified_MICCAI2024,\n author = { Feng, Xiaoyi and Zhang, Minqing and He, Mengxian and Gao, Mengdi and Wei, Hao and Yuan, Wu },\n title = { { Diversified and Structure-realistic Fundus Image Synthesis for Diabetic Retinopathy Lesion Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated diabetic retinopathy (DR) lesion segmentation aids in improving the efficiency of DR detection. However, obtaining lesion annotations for model training heavily relies on domain expertise and is a labor-intensive process. In addition to classical methods for alleviating label scarcity issues, such as self-supervised and semi-supervised learning, with the rapid development of generative models, several studies have indicated that utilizing synthetic image-mask pairs as data augmentation is promising. Due to the insufficient labeled data available to train powerful generative models, however, the synthetic fundus data suffers from two drawbacks: 1) unrealistic anatomical structures, 2) limited lesion diversity. In this paper, we propose a novel framework to synthesize fundus with DR lesion masks under limited labels. To increase lesion variation, we designed a learnable module to generate anatomically plausible masks as the condition, rather than directly using lesion masks from the limited dataset. To reduce the difficulty of learning intricate structures, we avoid directly generating images solely from lesion mask conditions. Instead, we developed an inpainting strategy that enables the model to generate lesions only within the mask area based on easily accessible healthy fundus images. Subjective evaluations indicate that our approach can generate more realistic fundus images with lesions compared to other generative methods. The downstream lesion segmentation experiments demonstrate that our synthetic data resulted in the most improvement across multiple network architectures, surpassing state-of-the-art methods.", "title":"Diversified and Structure-realistic Fundus Image Synthesis for Diabetic Retinopathy Lesion Segmentation", "authors":[ "Feng, Xiaoyi", "Zhang, Minqing", "He, Mengxian", "Gao, Mengdi", "Wei, Hao", "Yuan, Wu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":505 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2001_paper.pdf", "bibtext":"@InProceedings{ Zho_Robust_MICCAI2024,\n author = { Zhou, Xiaogen and Sun, Yiyou and Deng, Min and Chu, Winnie Chiu Wing and Dou, Qi },\n title = { { Robust Semi-supervised Multimodal Medical Image Segmentation via Cross Modality Collaboration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multimodal learning leverages complementary information derived from different modalities, thereby enhancing performance in medical image segmentation. However, prevailing multimodal learning methods heavily rely on extensive well-annotated data from various modalities to achieve accurate segmentation performance. This dependence often poses a challenge in clinical settings due to limited availability of such data. Moreover, the inherent anatomical misalignment between different imaging modalities further complicates the endeavor to enhance segmentation performance. To address this problem, we propose a novel semi-supervised multimodal segmentation framework that is robust to scarce labeled data and misaligned modalities. Our framework employs a novel cross modality collaboration strategy to distill modality-independent knowledge, which is inherently associated with each modality, and integrates this information into a unified fusion layer for feature amalgamation. With a channel-wise semantic consistency loss, our framework ensures alignment of modality-independent information from a feature-wise perspective across modalities, thereby fortifying it against misalignments in multimodal scenarios. Furthermore, our framework effectively integrates contrastive consistent learning to regulate anatomical structures, facilitating anatomical-wise prediction alignment on unlabeled data in semi-supervised segmentation tasks. Our method achieves competitive performance compared to other multimodal methods across three tasks: cardiac, abdominal multi-organ, and thyroid-associated orbitopathy segmentations. It also demonstrates outstanding robustness in scenarios involving scarce labeled data and misaligned modalities.", "title":"Robust Semi-supervised Multimodal Medical Image Segmentation via Cross Modality Collaboration", "authors":[ "Zhou, Xiaogen", "Sun, Yiyou", "Deng, Min", "Chu, Winnie Chiu Wing", "Dou, Qi" ], "id":"Conference", "arxiv_id":"2408.07341", "GitHub":[ "https:\/\/github.com\/med-air\/CMC" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":506 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0484_paper.pdf", "bibtext":"@InProceedings{ Li_Image_MICCAI2024,\n author = { Li, Zhe and Kainz, Bernhard },\n title = { { Image Distillation for Safe Data Sharing in Histopathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Histopathology can help clinicians make accurate diagnoses, determine disease prognosis, and plan appropriate treatment strategies. As deep learning techniques prove successful in the medical domain, the primary challenges become limited data availability and concerns about data sharing and privacy. Federated learning has addressed this challenge by training models locally and updating parameters on a server. However, issues, such as domain shift and bias, persist and impact overall performance. Dataset distillation presents an alternative approach to overcoming these challenges. It involves creating a small synthetic dataset that encapsulates essential information, which can be shared without constraints. At present, this paradigm is not practicable as current distillation approaches only generate non human readable representations and exhibit insufficient performance for downstream learning tasks. We train a latent diffusion model and construct a new distilled synthetic dataset with a small number of human readable synthetic images. Selection of maximally informative synthetic images is done via graph community analysis of the representation space. We compare downstream classification models trained on our synthetic distillation data to models trained on real data and reach performances suitable for practical application.", "title":"Image Distillation for Safe Data Sharing in Histopathology", "authors":[ "Li, Zhe", "Kainz, Bernhard" ], "id":"Conference", "arxiv_id":"2406.13536", "GitHub":[ "https:\/\/github.com\/ZheLi2020\/InfoDist" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":507 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2280_paper.pdf", "bibtext":"@InProceedings{ Ram_Ensemble_MICCAI2024,\n author = { Ramanathan, Vishwesh and Pati, Pushpak and McNeil, Matthew and Martel, Anne L. },\n title = { { Ensemble of Prior-guided Expert Graph Models for Survival Prediction in Digital Pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival prediction in pathology is a dynamic research field focused on identifying predictive biomarkers to enhance cancer survival models, providing valuable guidance for clinicians in treatment decisions. Graph-based methods, especially Graph Neural Networks (GNNs) leveraging rich interactions among different biological entities, have recently successfully predicted survival. However, the inherent heterogeneity among the entities within tissue slides significantly challenges the learning of GNNs. GNNs, operating with the homophily assumption, diffuse the intricate interactions among heterogeneous tissue entities in a tissue microenvironment. Further, the convoluted downstream task relevant information is not effectively exploited by graph-based methods when working with large slide-graphs. To address these challenges, we propose a novel prior-guided edge-attributed tissue-graph construction, followed by an ensemble of expert graph-attention survival models. Our method exploits diverse prognostic factors within numerous targeted tissue subgraphs of heterogeneous large slide-graphs.\nOur method achieves state-of-the-art results on four cancer types, improving overall survival prediction by 4.33% compared to the competing methods.", "title":"Ensemble of Prior-guided Expert Graph Models for Survival Prediction in Digital Pathology", "authors":[ "Ramanathan, Vishwesh", "Pati, Pushpak", "McNeil, Matthew", "Martel, Anne L." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Vishwesh4\/DGNN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":508 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2346_paper.pdf", "bibtext":"@InProceedings{ Gu_Reliable_MICCAI2024,\n author = { Gu, Ang Nan and Tsang, Michael and Vaseli, Hooman and Tsang, Teresa and Abolmaesumi, Purang },\n title = { { Reliable Multi-View Learning with Conformal Prediction for Aortic Stenosis Classification in Echocardiography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The fundamental problem with ultrasound-guided diagnosis is that the acquired images are often 2-D cross-sections of a 3-D anatomy, potentially missing important anatomical details. This limitation leads to challenges in ultrasound echocardiography, such as poor visualization of heart valves or foreshortening of ventricles. Clinicians must interpret these images with inherent uncertainty, a nuance absent in machine learning\u2019s one-hot labels. We propose Re-Training for Uncertainty (RT4U), a data-centric method to introduce uncertainty to weakly informative inputs in the training set. This simple approach can be incorporated to existing state-of-the-art aortic stenosis classification methods to further improve their accuracy. When combined with conformal prediction techniques, RT4U can yield adaptively sized prediction sets which are guaranteed to contain the ground truth class to a high accuracy. \nWe validate the effectiveness of RT4U on three diverse datasets: a public (TMED-2) and a private AS dataset, along with a CIFAR-10-derived toy dataset. Results show improvement on all the datasets. Our source code is publicly available at: https:\/\/github.com\/an-michaelg\/RT4U", "title":"Reliable Multi-View Learning with Conformal Prediction for Aortic Stenosis Classification in Echocardiography", "authors":[ "Gu, Ang Nan", "Tsang, Michael", "Vaseli, Hooman", "Tsang, Teresa", "Abolmaesumi, Purang" ], "id":"Conference", "arxiv_id":"2409.09680", "GitHub":[ "https:\/\/github.com\/an-michaelg\/RT4U" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":509 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1276_paper.pdf", "bibtext":"@InProceedings{ Rob_DRIM_MICCAI2024,\n author = { Robinet, Lucas and Berjaoui, Ahmad and Kheil, Ziad and Cohen-Jonathan Moyal, Elizabeth },\n title = { { DRIM: Learning Disentangled Representations from Incomplete Multimodal Healthcare Data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Real-life medical data is often multimodal and incomplete, fueling the growing need for advanced deep learning models capable of integrating them efficiently. \nThe use of diverse modalities, including histopathology slides, MRI, and genetic data, offers unprecedented opportunities to improve prognosis prediction and to unveil new treatment pathways. \nContrastive learning, widely used for deriving representations from paired data in multimodal tasks, assumes that different views contain the same task-relevant information and leverages only shared information. \nThis assumption becomes restrictive when handling medical data since each modality also harbors specific knowledge relevant to downstream tasks.\nWe introduce DRIM, a new multimodal method for capturing these shared and unique representations, despite data sparsity. \nMore specifically, given a set of modalities, we aim to encode a representation for each one that can be divided into two components: one encapsulating patient-related information common across modalities and the other, encapsulating modality-specific details. \nThis is achieved by increasing the shared information among different patient modalities while minimizing the overlap between shared and unique components within each modality.\nOur method outperforms state-of-the-art algorithms on glioma patients survival prediction tasks, while being robust to missing modalities. To promote reproducibility, the code is made publicly available at https:\/\/github.com\/Lucas-rbnt\/DRIM.", "title":"DRIM: Learning Disentangled Representations from Incomplete Multimodal Healthcare Data", "authors":[ "Robinet, Lucas", "Berjaoui, Ahmad", "Kheil, Ziad", "Cohen-Jonathan Moyal, Elizabeth" ], "id":"Conference", "arxiv_id":"2409.17055", "GitHub":[ "https:\/\/github.com\/Lucas-rbnt\/DRIM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":510 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3437_paper.pdf", "bibtext":"@InProceedings{ Hua_Resolving_MICCAI2024,\n author = { Huang, Yuliang and Eiben, Bjoern and Thielemans, Kris and McClelland, Jamie R. },\n title = { { Resolving Variable Respiratory Motion From Unsorted 4D Computed Tomography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"4D Computed Tomography (4DCT) is widely used for many clinical applications such as radiotherapy treatment planning, PET and ventilation imaging. However, common 4DCT methods reconstruct multiple breath cycles into a single, arbitrary breath cycle which can lead to various artefacts, impacting the downstream clinical applications. Surrogate driven motion models can estimate continuous variable motion across multiple cycles based on CT segments `unsorted\u2019 from 4DCT, but it requires respiration surrogate signals with strong correlation to the internal motion, which are not always available. The method proposed in this study eliminates such dependency by adapting the hyper-gradient method to the optimization of surrogate signals as hyper-parameters, while achieving better or comparable performance, as demonstrated on digital phantom simulations and real patient data. Our method produces a high-quality motion-compensated image together with estimates of the motion, including breath-to-breath variability, throughout the image acquisition. Our method has the potential to improve downstream clinical applications, and also enables retrospective analysis of open access 4DCT dataset where no respiration signals are stored. Code is available at https:\/\/github.com\/Yuliang-Huang\/4DCT-irregular-motion.", "title":"Resolving Variable Respiratory Motion From Unsorted 4D Computed Tomography", "authors":[ "Huang, Yuliang", "Eiben, Bjoern", "Thielemans, Kris", "McClelland, Jamie R." ], "id":"Conference", "arxiv_id":"2407.00665", "GitHub":[ "https:\/\/github.com\/Yuliang-Huang\/4DCT-irregular-motion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":511 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1378_paper.pdf", "bibtext":"@InProceedings{ Li_Epileptic_MICCAI2024,\n author = { Li, Zhuoyi and Li, Wenjun and Zhu, Ning and Han, Junwei and Liu, Tianming and Chen, Beibei and Yan, Zhiqiang and Zhang, Tuo },\n title = { { Epileptic Seizure Detection in SEEG Signals using a Unified Multi-scale Temporal-Spatial-Spectral Transformer Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"High-performance methods for automated detection of epileptic stereo-electroencephalography (SEEG) have important clinical research implications, improving the diagnostic efficiency and reducing physician burden. However, few studies have been able to consider the process of seizure propagation, thus failing to fully capture the deep representations and variations of SEEG in the temporal, spatial, and spectral domains. In this paper, we construct a novel long-term SEEG seizure dataset (LTSZ dataset), and propose channel embedding temporal-spatial-spectral transformer (CE-TSS-Transformer) framework. Firstly, we design channel embedding module to reduce feature dimensions and adaptively construct optimal representation for subsequent analysis. Secondly, we integrate unified multi-scale temporal-spatial-spectral analysis to capture multi-level, multi-domain deep features. Finally, we utilize the transformer encoder to learn the global relevance of features, enhancing the network\u2019s ability to express SEEG features. Experimental results demonstrate state-of-the-art detection performance on the LTSZ dataset, achieving sensitivity, specificity, and accuracy of 99.48%, 99.80%, and 99.48%, respectively. Furthermore, we validate the scalability of the proposed framework on two public datasets of different signal sources, demonstrating the power of the CE-TSS-Transformer framework for capturing diverse temporal-spatial-spectral patterns in seizure detection. The code\nis available at https:\/\/github.com\/lizhuoyi-eve\/CE-TSS-Transformer.", "title":"Epileptic Seizure Detection in SEEG Signals using a Unified Multi-scale Temporal-Spatial-Spectral Transformer Model", "authors":[ "Li, Zhuoyi", "Li, Wenjun", "Zhu, Ning", "Han, Junwei", "Liu, Tianming", "Chen, Beibei", "Yan, Zhiqiang", "Zhang, Tuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lizhuoyi-eve\/CE-TSS-Transformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":512 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1453_paper.pdf", "bibtext":"@InProceedings{ Zho_HeartBeat_MICCAI2024,\n author = { Zhou, Xinrui and Huang, Yuhao and Xue, Wufeng and Dou, Haoran and Cheng, Jun and Zhou, Han and Ni, Dong },\n title = { { HeartBeat: Towards Controllable Echocardiography Video Synthesis with Multimodal Conditions-Guided Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Echocardiography (ECHO) video is widely used for cardiac examination. In clinical, this procedure heavily relies on operator experience, which needs years of training and maybe the assistance of deep learning-based systems for enhanced accuracy and efficiency. However, it is challenging since acquiring sufficient customized data (e.g., abnormal cases) for novice training and deep model development is clinically unrealistic. Hence, controllable ECHO video synthesis is highly desirable. In this paper, we propose a novel diffusion-based framework named HeartBeat towards controllable and high-fidelity ECHO video synthesis. Our highlight is three-fold. First, HeartBeat serves as a unified framework that enables perceiving multimodal conditions simultaneously to guide controllable generation. Second, we factorize the multimodal conditions into local and global ones, with two insertion strategies separately provided fine- and coarse-grained controls in a composable and flexible manner. In this way, users can synthesize ECHO videos that conform to their mental imagery by combining multimodal control signals. Third, we propose to decouple the visual concepts and temporal dynamics learning using a two-stage training scheme for simplifying the model training. One more interesting thing is that HeartBeat can easily generalize to mask-guided cardiac MRI synthesis in a few shots, showcasing its scalability to broader applications. Extensive experiments on two public datasets show the efficacy of the proposed HeartBeat.", "title":"HeartBeat: Towards Controllable Echocardiography Video Synthesis with Multimodal Conditions-Guided Diffusion Models", "authors":[ "Zhou, Xinrui", "Huang, Yuhao", "Xue, Wufeng", "Dou, Haoran", "Cheng, Jun", "Zhou, Han", "Ni, Dong" ], "id":"Conference", "arxiv_id":"2406.14098", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":513 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1410_paper.pdf", "bibtext":"@InProceedings{ Wan_Contextguided_MICCAI2024,\n author = { Wan, Kaiwen and Wang, Bomin and Wu, Fuping and Gong, Haiyu and Zhuang, Xiahai },\n title = { { Context-guided Continual Reinforcement Learning for Landmark Detection with Incomplete Data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing landmark detection methods are primarily designed for centralized learning scenarios where all training data and labels are complete and available throughout the entire training phase. In real-world scenarios, training data may be collected sequentially, covering only part of the region of interest or providing incomplete landmark labels.\n In this work, we propose a novel continual reinforcement learning framework to tackle this complex situation in landmark detection.\nTo handle the increasing number of landmark targets during training, we introduce a Q-learning network that takes both observations and prompts as input. The prompts are stored in a buffer and utilized to guide the prediction for each landmark, enabling our method to adapt to the intricacies of the data collection process.\nWe validate our approach on two datasets: the RSNA-PBA dataset, representing scenarios with complete images and incomplete labels, and the WB-DXA dataset, representing situations where both images and labels are incomplete. The results demonstrate the effectiveness of the proposed method in landmark detection tasks with complex data structures. The source code will be available from https:\/\/github.com\/kevinwolcano\/CgCRL.", "title":"Context-guided Continual Reinforcement Learning for Landmark Detection with Incomplete Data", "authors":[ "Wan, Kaiwen", "Wang, Bomin", "Wu, Fuping", "Gong, Haiyu", "Zhuang, Xiahai" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":514 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2090_paper.pdf", "bibtext":"@InProceedings{ Lia_3DSAutoMed_MICCAI2024,\n author = { Liang, Junjie and Cao, Peng and Yang, Wenju and Yang, Jinzhu and Zaiane, Osmar R. },\n title = { { 3D-SAutoMed: Automatic Segment Anything Model for 3D Medical Image Segmentation from Local-Global Perspective } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D medical image segmentation is critical for clinical diagnosis and treatment planning. Recently, with the powerful generalization, the foundational segmentation model SAM is widely used in medical images. However, the existing SAM variants still have many limitations including lack of 3D-aware ability and automatic prompts. To address these limitations, we present a novel SAM-based segmentation framework in 3D medical images, namely 3D-SAutoMed. We respectively propose the Inter- and Intra-slice Attention and Historical slice Information Sharing strategy to share local and global information, so as to enable SAM to be 3D-aware. Meanwhile, we propose a Box Prompt Generator to automatically generate prompt embedding, leading full-automation in SAM. Our results demonstrate that 3D-SAutoMed outperforms advanced universal methods and SAM variants on both metrics and across BTCV, CHAOS and SegTHOR datasets. Particularly, a large improvement of HD score is achieved, e.g. 44% and 20.7% improvement compared with the best result in the other SAM variants on the BTCV and SegTHOR dataset, respectively.", "title":"3D-SAutoMed: Automatic Segment Anything Model for 3D Medical Image Segmentation from Local-Global Perspective", "authors":[ "Liang, Junjie", "Cao, Peng", "Yang, Wenju", "Yang, Jinzhu", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":515 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2888_paper.pdf", "bibtext":"@InProceedings{ Yan_Brain_MICCAI2024,\n author = { Yang, Li and He, Zhibin and Zhong, Tianyang and Li, Changhe and Zhu, Dajiang and Han, Junwei and Liu, Tianming and Zhang, Tuo },\n title = { { Brain Cortical Functional Gradients Predict Cortical Folding Patterns via Attention Mesh Convolution } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"A close relation between brain function and cortical folding has been demonstrated by macro-\/micro- imaging, computational modeling, and genetics. Since gyri and sulci, two basic anatomical building blocks of cortical folding patterns, were suggested to bear different functional roles, a precise mapping from brain function to gyro-sulcal patterns can provide profound insights into both biological and artificial neural networks. However, there lacks a generic theory and effective computational model so far, due to the highly nonlinear relation between them, huge inter-individual variabilities and a sophisticated description of brain function regions\/networks distribution as mosaics, such that spatial patterning of them has not been considered. To this end, as a preliminary effort, we adopted brain functional gradients derived from resting-state fMRI to embed the \u201cgradual\u201d change of functional connectivity patterns, and developed a novel attention mesh convolution model to predict cortical gyro-sulcal segmentation maps on individual brains. The convolution on mesh considers the spatial organization of functional gradients and folding patterns on a cortical sheet and the newly designed channel attention block enhances the interpretability of the \n contribution of different functional gradients to cortical folding prediction. Experiments show that the prediction performance via our model outperforms other state-of-the-art models. In addition, we found that the dominant functional gradients contribute less to folding prediction. On the activation maps of the last layer, some well-studied cortical landmarks are found on the borders of, rather than within, the highly activated regions. These results and findings suggest that a specifically designed artificial neural network can improve the precision of the mapping between brain functions and cortical folding patterns, and can provide valuable insight of brain anatomy-function relation for neuroscience.", "title":"Brain Cortical Functional Gradients Predict Cortical Folding Patterns via Attention Mesh Convolution", "authors":[ "Yang, Li", "He, Zhibin", "Zhong, Tianyang", "Li, Changhe", "Zhu, Dajiang", "Han, Junwei", "Liu, Tianming", "Zhang, Tuo" ], "id":"Conference", "arxiv_id":"2205.10605", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":516 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1554_paper.pdf", "bibtext":"@InProceedings{ Wan_Ordinal_MICCAI2024,\n author = { Wang, Xin and Tan, Tao and Gao, Yuan and Marcus, Eric and Han, Luyi and Portaluri, Antonio and Zhang, Tianyu and Lu, Chunyao and Liang, Xinglong and Beets-Tan, Regina and Teuwen, Jonas and Mann, Ritse },\n title = { { Ordinal Learning: Longitudinal Attention Alignment Model for Predicting Time to Future Breast Cancer Events from Mammograms } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Precision breast cancer (BC) risk assessment is crucial for developing individualized screening and prevention. Despite the promising potential of recent mammogram (MG) based deep learning models in predicting BC risk, they mostly overlook the \u201ctime-to-future-event\u201d ordering among patients and exhibit limited explorations into how they track history changes in breast tissue, thereby limiting their clinical application. In this work, we propose a novel method, named OA-BreaCR, to precisely model the ordinal relationship of the time to and between BC events while incorporating longitudinal breast tissue changes in a more explainable manner. We validate our method on public EMBED and inhouse datasets, comparing with existing BC risk prediction and time prediction methods. Our ordinal learning method OA-BreaCR outperforms existing methods in both BC risk and time-to-future-event prediction tasks. Additionally, ordinal heatmap visualizations show the model\u2019s attention over time. Our findings underscore the importance of interpretable and precise risk assessment for enhancing BC screening and prevention efforts. The code will be accessible to the public.", "title":"Ordinal Learning: Longitudinal Attention Alignment Model for Predicting Time to Future Breast Cancer Events from Mammograms", "authors":[ "Wang, Xin", "Tan, Tao", "Gao, Yuan", "Marcus, Eric", "Han, Luyi", "Portaluri, Antonio", "Zhang, Tianyu", "Lu, Chunyao", "Liang, Xinglong", "Beets-Tan, Regina", "Teuwen, Jonas", "Mann, Ritse" ], "id":"Conference", "arxiv_id":"2409.06887", "GitHub":[ "https:\/\/github.com\/xinwangxinwang\/OA-BreaCR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":517 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3076_paper.pdf", "bibtext":"@InProceedings{ Van_Privacy_MICCAI2024,\n author = { Van der Goten, Lennart A. and Smith, Kevin },\n title = { { Privacy Protection in MRI Scans Using 3D Masked Autoencoders } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"MRI scans provide valuable medical information, however they also contain sensitive and personally identifiable information that needs to be protected. Whereas MRI metadata is easily sanitized, MRI image data is a privacy risk because it contains information to render highly-realistic 3D visualizations of a patient\u2019s head, enabling malicious actors to possibly identify the subject by cross-referencing a database. Data anonymization and de-identification is concerned with ensuring the privacy and confidentiality of individuals\u2019 personal information. Traditional MRI de-identification methods remove privacy-sensitive parts (e.g. eyes, nose etc.) from a given scan. This comes at the expense of introducing a domain shift that can throw off downstream analyses. In this work, we propose CP-MAE, a model that de-identifies the face by remodeling it (e.g. changing the face) rather than by removing parts using masked autoencoders.\nCP-MAE outperforms all previous approaches in terms of downstream task performance as well as de-identification. \nWith our method we are able to synthesize high-fidelity scans of resolution up to 256^3 on the ADNI and OASIS-3 datasets \u2013 compared to 128^3 with previous approaches \u2013 which constitutes an eight-fold increase in the number of voxels.", "title":"Privacy Protection in MRI Scans Using 3D Masked Autoencoders", "authors":[ "Van der Goten, Lennart A.", "Smith, Kevin" ], "id":"Conference", "arxiv_id":"2310.15778", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":518 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2268_paper.pdf", "bibtext":"@InProceedings{ Qiu_Leveraging_MICCAI2024,\n author = { Qiu, Jingna and Aubreville, Marc and Wilm, Frauke and \u00d6ttl, Mathias and Utz, Jonas and Schlereth, Maja and Breininger, Katharina },\n title = { { Leveraging Image Captions for Selective Whole Slide Image Annotation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Acquiring annotations for whole slide images (WSIs)-based deep learning tasks, such as creating tissue segmentation masks or detecting mitotic figures, is a laborious process due to the extensive image size and the significant manual work involved in the annotation. This paper focuses on identifying and annotating specific image regions that optimize model training, given a limited annotation budget. While random sampling helps capture data variance by collecting annotation regions throughout the WSIs, insufficient data curation may result in an inadequate representation of minority classes. Recent studies proposed diversity sampling to select a set of regions that maximally represent unique characteristics of the WSIs. This is done by pretraining on unlabeled data through self-supervised learning and then clustering all regions in the latent space. However, establishing the optimal number of clusters can be difficult and not all clusters are task-relevant. This paper presents prototype sampling, a new method for annotation region selection. It discovers regions exhibiting typical characteristics of each task-specific class. The process entails recognizing class prototypes from extensive histopathology image-caption databases and detecting unlabeled image regions that resemble these prototypes. Our results show that prototype sampling is more effective than random and diversity sampling in identifying annotation regions with valuable training information, resulting in improved model performance in semantic segmentation and mitotic figure detection tasks. Code is available at https:\/\/github.com\/DeepMicroscopy\/Prototype-sampling.", "title":"Leveraging Image Captions for Selective Whole Slide Image Annotation", "authors":[ "Qiu, Jingna", "Aubreville, Marc", "Wilm, Frauke", "\u00d6ttl, Mathias", "Utz, Jonas", "Schlereth, Maja", "Breininger, Katharina" ], "id":"Conference", "arxiv_id":"2407.06363", "GitHub":[ "https:\/\/github.com\/DeepMicroscopy\/Prototype-sampling" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":519 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0380_paper.pdf", "bibtext":"@InProceedings{ Wan_Latent_MICCAI2024,\n author = { Wang, Edward and Au, Ryan and Lang, Pencilla and Mattonen, Sarah A. },\n title = { { Latent Spaces Enable Transformer-Based Dose Prediction in Complex Radiotherapy Plans } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Evidence is accumulating in favour of using stereotactic ablative body radiotherapy (SABR) to treat multiple cancer lesions in the lung. Multi-lesion lung SABR plans are complex and require significant resources to create. In this work, we propose a novel two-stage latent transformer framework (LDFormer) for dose prediction of lung SABR plans with varying numbers of lesions. In the first stage, patient anatomical information and the dose distribution are encoded into a latent space. In the second stage, a transformer learns to predict the dose latent from the anatomical latents. Causal attention is modified to adapt to different numbers of lesions. LDFormer outperforms a state-of-the-art generative adversarial network on dose conformality in and around lesions, and the performance gap widens when considering overlapping lesions. LDFormer generates predictions of 3-D dose distributions in under 30s on consumer hardware, and has the potential to assist physicians with clinical decision making, reduce resource costs, and accelerate treatment planning.", "title":"Latent Spaces Enable Transformer-Based Dose Prediction in Complex Radiotherapy Plans", "authors":[ "Wang, Edward", "Au, Ryan", "Lang, Pencilla", "Mattonen, Sarah A." ], "id":"Conference", "arxiv_id":"2407.08650", "GitHub":[ "https:\/\/github.com\/edwardwang1\/LDFormer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":520 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3643_paper.pdf", "bibtext":"@InProceedings{ Buj_Seeing_MICCAI2024,\n author = { Bujny, Mariusz and Jesionek, Katarzyna and Nalepa, Jakub and Bartczak, Tomasz and Miszalski-Jamka, Karol and Kostur, Marcin },\n title = { { Seeing the Invisible: On Aortic Valve Reconstruction in Non-Contrast CT } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of the aortic valve (AV) in computed tomography (CT) scans is crucial for assessing AV disease severity and identifying patients who may benefit from interventional treatments, such as surgical and percutaneous procedures. Evaluation of AV calcium score on non-contrast CT scans emphasizes the importance of identifying AV from these scans. However, it is not a trivial task due to the extremely low visibility of AV in this type of medical images. In this paper, we propose a method for semi-automatic generation of Ground Truth (GT) data for this problem based on image registration. In a weakly-supervised learning process, we train neural network models capable of accurate segmentation of AV based exclusively on non-contrast CT scans. We also present a novel approach for the evaluation of segmentation accuracy, based on per-patient, rigid registration of masks segmented in contrast and non-contrast images. Evaluation on an open-source dataset demonstrates that our model can identify AV with a mean error of less than 1 mm, suggesting significant potential for clinical application. In particular, the model can be used to enhance end-to-end deep learning approaches for AV calcium scoring by offering substantial accuracy improvements and increasing the explainability. Furthermore, it contributes to lowering the rate of false positives in coronary artery calcium scoring through the meticulous exclusion of aortic root calcifications.", "title":"Seeing the Invisible: On Aortic Valve Reconstruction in Non-Contrast CT", "authors":[ "Bujny, Mariusz", "Jesionek, Katarzyna", "Nalepa, Jakub", "Bartczak, Tomasz", "Miszalski-Jamka, Karol", "Kostur, Marcin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":521 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3852_paper.pdf", "bibtext":"@InProceedings{ Cho_Misjudging_MICCAI2024,\n author = { Cho, Sue Min and Taylor, Russell H. and Unberath, Mathias },\n title = { { Misjudging the Machine: Gaze May Forecast Human-Machine Team Performance in Surgery } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In human-centered assurance, an emerging field in technology-assisted surgery, humans assess algorithmic outputs by interpreting the provided information. Focusing on image-based registration, we investigate whether gaze patterns can predict the efficacy of human-machine collaboration. Gaze data is collected during a user study to assess 2D\/3D registration results with different visualization paradigms. We then comprehensively examine gaze metrics (fixation count, fixation duration, stationary gaze entropy, and gaze transition entropy) and their relationship with assessment error. We also test the effect of visualization paradigms on different gaze metrics. There is a significant negative correlation between assessment error and both fixation count and fixation duration; increased fixation counts or duration are associated with lower assessment errors. Neither stationary gaze entropy nor gaze transition entropy displays a significant relationship with assessment error. Notably, visualization paradigms demonstrate a significant impact on all four gaze metrics. Gaze metrics hold potential as predictors for human-machine performance. The importance and impact of various gaze metrics require further task-specific exploration. Our analyses emphasize that the presentation of visual information crucially influences user perception.", "title":"Misjudging the Machine: Gaze May Forecast Human-Machine Team Performance in Surgery", "authors":[ "Cho, Sue Min", "Taylor, Russell H.", "Unberath, Mathias" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":522 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2349_paper.pdf", "bibtext":"@InProceedings{ Zen_Reliable_MICCAI2024,\n author = { Zeng, Hongye and Zou, Ke and Chen, Zhihao and Zheng, Rui and Fu, Huazhu },\n title = { { Reliable Source Approximation: Source-Free Unsupervised Domain Adaptation for Vestibular Schwannoma MRI Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Source-Free Unsupervised Domain Adaptation (SFUDA) has recently become a focus in the medical image domain adaptation, as it only utilizes the source model and does not require annotated target data. However, current SFUDA approaches cannot tackle the complex segmentation task across different MRI sequences, such as the vestibular schwannoma segmentation. To address this problem, we proposed Reliable Source Approximation (RSA), which can generate source-like and structure-preserved images from the target domain for updating model parameters and adapting domain shifts. Specifically, RSA deploys a conditional diffusion model to generate multiple source-like images under the guidance of varying edges of one target image. An uncertainty estimation module is then introduced to predict and refine reliable pseudo labels of generated images, and the prediction consistency is developed to select the most reliable generations. Subsequently, all reliable generated images and their pseudo labels are utilized to update the model. Our RSA is validated on vestibular schwannoma segmentation across multi-modality MRI. The experimental results demonstrate that RSA consistently improves domain adaptation performance over other state-of-the-art SFUDA methods. \\textbf{We will release all codes for reproduction after acceptance.}", "title":"Reliable Source Approximation: Source-Free Unsupervised Domain Adaptation for Vestibular Schwannoma MRI Segmentation", "authors":[ "Zeng, Hongye", "Zou, Ke", "Chen, Zhihao", "Zheng, Rui", "Fu, Huazhu" ], "id":"Conference", "arxiv_id":"2405.16102", "GitHub":[ "https:\/\/github.com\/zenghy96\/Reliable-Source-Approximation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":523 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0827_paper.pdf", "bibtext":"@InProceedings{ Liu_LGS_MICCAI2024,\n author = { Liu, Hengyu and Liu, Yifan and Li, Chenxin and Li, Wuyang and Yuan, Yixuan },\n title = { { LGS: A Light-weight 4D Gaussian Splatting for Efficient Surgical Scene Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The advent of 3D Gaussian Splatting (3D-GS) techniques and their dynamic scene modeling variants, 4D-GS, offers promising prospects for real-time rendering of dynamic surgical scenarios. However, the prerequisite for modeling dynamic scenes by a large number of Gaussian units, the high-dimensional Gaussian attributes and the high-resolution deformation fields, all lead to serve storage issues that hinder real-time rendering in resource-limited surgical equipment. To surmount these limitations, we introduce a Lightweight 4D Gaussian Splatting framework (LGS) that can liberate the efficiency bottlenecks of both rendering and storage for dynamic endoscopic reconstruction. Specifically, to minimize the redundancy of Gaussian quantities, we propose Deformation-Aware Pruning by gauging the impact of each Gaussian on deformation. Concurrently, to reduce the redundancy of Gaussian attributes, we simplify the representation of textures and lighting in non-crucial areas by pruning the dimensions of Gaussian attributes. We further resolve the feature field redundancy caused by the high resolution of 4D neural spatiotemporal encoder for modeling dynamic scenes via a 4D feature field condensation. Experiments on public benchmarks demonstrate the efficacy of LGS in terms of a compression rate exceeding 9 times while maintaining the pleasing visual quality and real-time rendering efficiency. LGS confirms a substantial step towards its application in robotic surgical services. Project page: https:\/\/lgs-endo.github.io\/.", "title":"LGS: A Light-weight 4D Gaussian Splatting for Efficient Surgical Scene Reconstruction", "authors":[ "Liu, Hengyu", "Liu, Yifan", "Li, Chenxin", "Li, Wuyang", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"2406.16073", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/LGS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":524 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1486_paper.pdf", "bibtext":"@InProceedings{ Thi_Differentiable_MICCAI2024,\n author = { Thies, Mareike and Maul, Noah and Mei, Siyuan and Pfaff, Laura and Vysotskaya, Nastassia and Gu, Mingxuan and Utz, Jonas and Possart, Dennis and Folle, Lukas and Wagner, Fabian and Maier, Andreas },\n title = { { Differentiable Score-Based Likelihoods: Learning CT Motion Compensation From Clean Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Motion artifacts can compromise the diagnostic value of computed tomography (CT) images. Motion correction approaches require a per-scan estimation of patient-specific motion patterns. In this work, we train a score-based model to act as a probability density estimator for clean head CT images. Given the trained model, we quantify the deviation of a given motion-affected CT image from the ideal distribution through likelihood computation. We demonstrate that the likelihood can be utilized as a surrogate metric for motion artifact severity in the CT image facilitating the application of an iterative, gradient-based motion compensation algorithm. By optimizing the underlying motion parameters to maximize likelihood, our method effectively reduces motion artifacts, bringing the image closer to the distribution of motion-free scans. Our approach achieves comparable performance to state-of-the-art methods while eliminating the need for a representative data set of motion-affected samples. This is particularly advantageous in real-world applications, where patient motion patterns may exhibit unforeseen variability, ensuring robustness without implicit assumptions about recoverable motion types.", "title":"Differentiable Score-Based Likelihoods: Learning CT Motion Compensation From Clean Images", "authors":[ "Thies, Mareike", "Maul, Noah", "Mei, Siyuan", "Pfaff, Laura", "Vysotskaya, Nastassia", "Gu, Mingxuan", "Utz, Jonas", "Possart, Dennis", "Folle, Lukas", "Wagner, Fabian", "Maier, Andreas" ], "id":"Conference", "arxiv_id":"2404.14747", "GitHub":[ "https:\/\/github.com\/mareikethies\/moco_diff_likelihood" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":525 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0399_paper.pdf", "bibtext":"@InProceedings{ Guo_MMSummary_MICCAI2024,\n author = { Guo, Xiaoqing and Men, Qianhui and Noble, J. Alison },\n title = { { MMSummary: Multimodal Summary Generation for Fetal Ultrasound Video } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present the first automated multimodal summary generation system, MMSummary, for medical imaging video, particularly with a focus on fetal ultrasound analysis. Imitating the examination process performed by a human sonographer, MMSummary is designed as a three-stage pipeline, progressing from keyframe detection to keyframe captioning and finally anatomy segmentation and measurement. In the keyframe detection stage, an innovative automated workflow is proposed to progressively select a concise set of keyframes, preserving sufficient video information without redundancy. Subsequently, we adapt a large language model to generate meaningful captions for fetal ultrasound keyframes in the keyframe captioning stage. If a keyframe is captioned as fetal biometry, the segmentation and measurement stage estimates biometric parameters by segmenting the region of interest according to the textual prior. The MMSummary system provides comprehensive summaries for fetal ultrasound examinations and based on reported experiments is estimated to reduce scanning time by approximately 31.5%, thereby suggesting the potential to enhance clinical workflow efficiency.", "title":"MMSummary: Multimodal Summary Generation for Fetal Ultrasound Video", "authors":[ "Guo, Xiaoqing", "Men, Qianhui", "Noble, J. Alison" ], "id":"Conference", "arxiv_id":"2408.03761", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":526 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0310_paper.pdf", "bibtext":"@InProceedings{ Pei_DepthDriven_MICCAI2024,\n author = { Pei, Jialun and Cui, Ruize and Li, Yaoqian and Si, Weixin and Qin, Jing and Heng, Pheng-Ann },\n title = { { Depth-Driven Geometric Prompt Learning for Laparoscopic Liver Landmark Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Laparoscopic liver surgery poses a complex intraoperative dynamic environment for surgeons, where remains a significant challenge to distinguish critical or even hidden structures inside the liver.\nLiver anatomical landmarks, e.g., ridge and ligament, serve as important markers for 2D-3D alignment, which can significantly enhance the spatial perception of surgeons for precise surgery. To facilitate the detection of laparoscopic liver landmarks, we collect a novel dataset called L3D, which comprises 1,152 frames with elaborated landmark annotations from sur\u0002gical videos of 39 patients across two medical sites. For benchmarking purposes, 12 mainstream detection methods are selected and comprehensively evaluated on L3D. Further, we propose a depth-driven geometric prompt learning network, namely D2GPLand. Specifically, we design a Depth-aware Prompt Embedding (DPE) module that is guided by self-supervised prompts and generates semantically relevant geometric information with the benefit of global depth cues extracted from SAM-based features. Additionally, a Semantic-specific Geometric Augmentation (SGA) scheme is introduced to efficiently merge RGB-D spatial and geometric information through reverse anatomic perception. The experimental results indicate that D2GPLand obtains state-of-the-art performance on L3D, with 63.52% DICE and 48.68% IoU scores. Together with 2D-3D fusion technology, our method can directly provide the surgeon with intuitive guidance information in laparoscopic scenarios. Our code and dataset are available at https:\/\/github.com\/PJLallen\/D2GPLand.", "title":"Depth-Driven Geometric Prompt Learning for Laparoscopic Liver Landmark Detection", "authors":[ "Pei, Jialun", "Cui, Ruize", "Li, Yaoqian", "Si, Weixin", "Qin, Jing", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"2406.17858", "GitHub":[ "https:\/\/github.com\/PJLallen\/D2GPLand" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":527 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2723_paper.pdf", "bibtext":"@InProceedings{ He_mQSM_MICCAI2024,\n author = { He, Junjie and Fu, Bangkang and Xiong, Zhenliang and Peng, Yunsong and Wang, Rongpin },\n title = { { mQSM: Multitask Learning-based Quantitative Susceptibility Mapping for Iron Analysis in Brain } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Quantitative analysis of brain iron is widely utilized in neurodegenerative diseases, typically accomplished through the utilization of quantitative susceptibility mapping (QSM) and medical image registration. However, this approach heavily relies on registration accuracy, and image registration can alter QSM values, leading to distorted quantitative analysis results. This paper proposes a multi-modal multitask QSM reconstruction algorithm (mQSM) and introduces a mutual Transformer mechanism (mTrans) to efficiently fuse multi-modal information for QSM reconstruction and brain region segmentation tasks. mTrans leverages Transformer computations on Query and Value feature matrices for mutual attention calculation, eliminating the need for additional computational modules and ensuring high efficiency in multi-modal data fusion. Experimental results demonstrate an average dice coefficient of 0.92 for segmentation, and QSM reconstruction achieves an SSIM evaluation of 0.9854 compared to the gold standard. Moreover, segmentation-based (mQSM) brain iron quantitative analysis shows no significant difference from the ground truth, whereas the registration-based approach exhibits notable differences in brain cortical regions compared to the ground truth. Our code is available at https:\/\/github.com\/TyrionJ\/mQSM.", "title":"mQSM: Multitask Learning-based Quantitative Susceptibility Mapping for Iron Analysis in Brain", "authors":[ "He, Junjie", "Fu, Bangkang", "Xiong, Zhenliang", "Peng, Yunsong", "Wang, Rongpin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/TyrionJ\/mQSM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":528 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2514_paper.pdf", "bibtext":"@InProceedings{ Zha_ACurvatureGuided_MICCAI2024,\n author = { Zhao, Fenqiang and Tang, Yuxing and Lu, Le and Zhang, Ling },\n title = { { A Curvature-Guided Coarse-to-Fine Framework for Enhanced Whole Brain Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Whole brain segmentation, which divides the entire brain volume into anatomically labeled regions of interest (ROIs), is a crucial step in brain image analysis. Traditional methods often rely on intricate pipelines that, while accurate, are time-consuming and require expertise due to their complexity. Alternatively, end-to-end deep learning methods offer rapid whole brain segmentation but often sacrifice accuracy due to neglect of geometric features. In this paper, we propose a novel framework that integrates the key curvature feature, previously utilized by complex surface-based pipelines but overlooked by volume-based methods, into deep neural networks, thereby achieving both high accuracy and efficiency. Specifically, we first train a coarse anatomical segmentation model focusing on high-contrast tissue types, i.e., white matter (WM), gray matter (GM), and subcortical regions. Next, we reconstruct the cortical surfaces using the WM\/GM interface and compute curvature features for each vertex on the surfaces. These curvature features are then mapped back to the image space, where they are combined with intensity features to train a finer cortical parcellation model. We also simplify the process of cortical surface reconstruction and curvature computation, thereby enhancing the overall efficiency of the framework. Additionally, our framework is flexible and can incorporate any neural network as its backbone. It can serve as a plug-and-play component to enhance the whole brain segmentation results of any segmentation network. Experimental results on the public Mindboggle-101 dataset demonstrate improved segmentation performance with comparable speed compared to various deep learning methods.", "title":"A Curvature-Guided Coarse-to-Fine Framework for Enhanced Whole Brain Segmentation", "authors":[ "Zhao, Fenqiang", "Tang, Yuxing", "Lu, Le", "Zhang, Ling" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":529 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2088_paper.pdf", "bibtext":"@InProceedings{ Ran_DeepRepViz_MICCAI2024,\n author = { Rane, Roshan Prakash and Kim, JiHoon and Umesha, Arjun and Stark, Didem and Schulz, Marc-Andr\u00e9 and Ritter, Kerstin },\n title = { { DeepRepViz: Identifying potential confounders in deep learning model predictions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep Learning (DL) has emerged as a powerful tool in neuroimaging research. DL models predicting brain pathologies, psychological behaviors, and cognitive traits from neuroimaging data have the potential to discover the neurobiological basis of these phenotypes. However, these models can be biased from information related to age, sex, or spurious imaging artifacts encoded in the neuroimaging data. \nIn this study, we introduce a lightweight and easy-to-use framework called \u2018DeepRepViz\u2019 designed to detect such potential confounders in DL model predictions and enhance the transparency of predictive DL models. DeepRepViz comprises two components - an online visualization tool (available at https:\/\/deep-rep-viz.vercel.app\/) and a metric called the \u2018Con-score\u2019. The tool enables researchers to visualize the final latent representation of their DL model and qualitatively inspect it for biases. The Con-score, or the `concept encoding\u2019 score, quantifies the extent to which potential confounders like sex or age are encoded in the final latent representation and influences the model predictions. We illustrate the rationale of the Con-score formulation using a simulation experiment.\nNext, we demonstrate the utility of the DeepRepViz framework by applying it to three typical neuroimaging-based prediction tasks (n=12000). These include (a) distinguishing chronic alcohol users from controls, (b) classifying sex, and (c) predicting the speed of completing a cognitive task known as \u2018trail making\u2019. \nIn the DL model predicting chronic alcohol users, DeepRepViz uncovers a strong influence of sex on the predictions (Con-score=0.35). In the model predicting cognitive task performance, DeepRepViz reveals that age plays a major role (Con-score=0.3). Thus, the DeepRepViz framework enables neuroimaging researchers to systematically examine their model and identify potential biases, thereby improving the transparency of predictive DL models in neuroimaging studies.", "title":"DeepRepViz: Identifying potential confounders in deep learning model predictions", "authors":[ "Rane, Roshan Prakash", "Kim, JiHoon", "Umesha, Arjun", "Stark, Didem", "Schulz, Marc-Andr\u00e9", "Ritter, Kerstin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ritterlab\/DeepRepViz" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":530 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1527_paper.pdf", "bibtext":"@InProceedings{ Mur_Class_MICCAI2024,\n author = { Murugesan, Balamurali and Silva-Rodriguez, Julio and Ben Ayed, Ismail and Dolz, Jose },\n title = { { Class and Region-Adaptive Constraints for Network Calibration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this work, we present a novel approach to calibrate segmentation networks that considers the inherent challenges posed by different categories and object regions. In particular, we present a formulation that integrates class and region-wise constraints into the learning objective, with multiple penalty weights to account for class and region differences. Finding the optimal penalty weights manually, however, might be unfeasible, and potentially hinder the optimization process. To overcome this limitation, we propose an approach based on Class and Region-Adaptive constraints (CRaC), which allows to learn the class and region-wise penalty weights during training. CRaC is based on a general Augmented Lagrangian method, a well-established technique in constrained optimization. Experimental results on two popular segmentation benchmarks, and two well-known segmentation networks, demonstrate the superiority of CRaC compared to existing approaches. The code is available at: https:\/\/github.com\/Bala93\/CRac\/", "title":"Class and Region-Adaptive Constraints for Network Calibration", "authors":[ "Murugesan, Balamurali", "Silva-Rodriguez, Julio", "Ben Ayed, Ismail", "Dolz, Jose" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Bala93\/CRac\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":531 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2376_paper.pdf", "bibtext":"@InProceedings{ Zha_Fuzzy_MICCAI2024,\n author = { Zhang, Sheng and Nan, Yang and Fang, Yingying and Wang, Shiyi and Xing, Xiaodan and Gao, Zhifan and Yang, Guang },\n title = { { Fuzzy Attention-based Border Rendering Network for Lung Organ Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic lung organ segmentation on CT images is crucial for lung disease diagnosis. However, the unlimited voxel values and class imbalance of lung organs can lead to false-negative\/positive and leakage issues in advanced methods. Additionally, some slender lung organs are easily lost during the recycled down\/up-sample procedure, e.g., bronchioles & arterioles, causing severe discontinuity issue. Inspired by these, this paper introduces an effective lung organ segmentation method called Fuzzy Attention-based Border Rendering (FABR) network. Since fuzzy logic can handle the uncertainty in feature extraction, hence the fusion of deep networks and fuzzy sets should be a viable solution for better performance. Meanwhile, unlike prior top-tier methods that operate on all regular dense points, our FABR depicts lung organ regions as cube-trees, focusing only on recycle-sampled border vulnerable points, rendering the severely discontinuous, false-negative\/positive organ regions with a novel Global-Local Cube-tree Fusion (GLCF) module. All experimental results, on four challenging datasets of airway & artery, demonstrate that our method can achieve the favorable performance significantly.", "title":"Fuzzy Attention-based Border Rendering Network for Lung Organ Segmentation", "authors":[ "Zhang, Sheng", "Nan, Yang", "Fang, Yingying", "Wang, Shiyi", "Xing, Xiaodan", "Gao, Zhifan", "Yang, Guang" ], "id":"Conference", "arxiv_id":"2406.16189", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":532 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2558_paper.pdf", "bibtext":"@InProceedings{ Hou_QualityAware_MICCAI2024,\n author = { Hou, Tao and Huang, Jiashuang and Jiang, Shu and Ding, Weiping },\n title = { { Quality-Aware Fuzzy Min-Max Neural Networks for Dynamic Brain Network Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dynamic functional connections (dFCs) have been widely used for the diagnosis of brain diseases. However, current dynamic brain network analysis methods ignore the fuzzy information of the brain network and the uncertainty arising from the inconsistent data quality of different windows, providing unreliable integration for multiple windows. In this paper, we propose a dynamic brain network analysis method based on quality-aware fuzzy min-max neural networks (QFMMNet). The individual window of dFCs is treated as a view, and we define three convolution filters to extract features from the brain network under the multi-view learning framework, thereby obtaining multi-view evidence for dFCs. We design multi-view fuzzy min-max neural networks (MFMM) based on fuzzy sets to deal with the fuzzy information of the brain network, which takes evidence as input patterns to generate hyperboxes and serves as the classification layer of each view. A quality-aware ensemble module is introduced to deal with uncertainty, which employs D-S theory to directly model the uncertainty and evaluate the dynamic quality-aware weighting of each view. Experiments on two real schizophrenia datasets demonstrate the effectiveness and advantages of our proposed method. Our codes are available at https:\/\/github.com\/scurrytao\/QFMMNet.", "title":"Quality-Aware Fuzzy Min-Max Neural Networks for Dynamic Brain Network Analysis", "authors":[ "Hou, Tao", "Huang, Jiashuang", "Jiang, Shu", "Ding, Weiping" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/scurrytao\/QFMMNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":533 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2184_paper.pdf", "bibtext":"@InProceedings{ Wan_TriPlane_MICCAI2024,\n author = { Wang, Hualiang and Lin, Yiqun and Ding, Xinpeng and Li, Xiaomeng },\n title = { { Tri-Plane Mamba: Efficiently Adapting Segment Anything Model for 3D Medical Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"General networks for 3D medical image segmentation have recently undergone extensive exploration. Behind the exceptional performance of these networks lies a significant demand for a large volume of pixel-level annotated data, which is time-consuming and labor-intensive. The emergence of the Segment Anything Model (SAM) has enabled this model to achieve superior performance in 2D medical image segmentation tasks via parameter- and data-efficient feature adaptation. However, the introduction of additional depth channels in 3D medical images not only prevents the sharing of 2D pre-trained features but also results in a quadratic increase in the computational cost for adapting SAM.\nTo overcome these challenges, we present the \\textbf{T}ri-\\textbf{P}lane \\textbf{M}amba (TP-Mamba) adapters tailored for the SAM, featuring two major innovations: 1) multi-scale 3D convolutional adapters, optimized for efficiently processing local depth-level information, 2) a tri-plane mamba module, engineered to capture long-range depth-level representation without significantly increasing computational costs.\nThis approach achieves state-of-the-art performance in 3D CT organ segmentation tasks. Remarkably, this superior performance is maintained even with scarce training data. Specifically using only three CT training samples from the BTCV dataset, it surpasses conventional 3D segmentation networks, attaining a Dice score that is up to 12% higher.", "title":"Tri-Plane Mamba: Efficiently Adapting Segment Anything Model for 3D Medical Images", "authors":[ "Wang, Hualiang", "Lin, Yiqun", "Ding, Xinpeng", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xmed-lab\/TP-Mamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":534 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2758_paper.pdf", "bibtext":"@InProceedings{ Hwa_Multiorder_MICCAI2024,\n author = { Hwang, Yechan and Hwang, Soojin and Wu, Guorong and Kim, Won Hwa },\n title = { { Multi-order Simplex-based Graph Neural Network for Brain Network Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"A brain network is defined by wiring anatomical regions in the brain with structural and functional relationships. It has an intricate topology with handful early features\/biomarkers of neurodegenerative diseases, which emphasize the importance of analyzing connectomic features alongside region-wise assessments. Various graph neural network (GNN) approaches have been developed for brain network analysis, however, they mainly focused on node-centric analyses often treating edge features as an auxiliary information (i.e., adjacency matrix) to enhance node representations. In response, we propose a method that explicitly learns node and edge embeddings for brain network analysis. Introducing a dual aggregation framework, our model incorporates a novel spatial graph convolution layer with an incidence matrix. Enabling concurrent node-wise and edge-wise information aggregation for both nodes and edges, this framework captures the intricate node-edge relationships within the brain. Demonstrating superior performance on the Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI) dataset, our model effectively handles the complex topology of brain networks. Furthermore, our model yields interpretable results with Grad-CAM, selectively identifying brain Regions of Interest (ROIs) and connectivities associated with AD, aligning with prior AD literature.", "title":"Multi-order Simplex-based Graph Neural Network for Brain Network Analysis", "authors":[ "Hwang, Yechan", "Hwang, Soojin", "Wu, Guorong", "Kim, Won Hwa" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":535 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2173_paper.pdf", "bibtext":"@InProceedings{ Kim_LLMguided_MICCAI2024,\n author = { Kim, Kyungwon and Lee, Yongmoon and Park, Doohyun and Eo, Taejoon and Youn, Daemyung and Lee, Hyesang and Hwang, Dosik },\n title = { { LLM-guided Multi-modal Multiple Instance Learning for 5-year Overall Survival Prediction of Lung Cancer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurately predicting the 5-year prognosis of lung cancer patients is crucial for guiding treatment planning and providing optimal patient care. Traditional methods relying on CT image-based cancer stage assessment and morphological analysis of cancer cells in pathology images have encountered challenges in terms of reliability and accuracy due to the complexity and diversity of information within these images.\nRecent rapid advancements in deep learning have shown promising performance in prognosis prediction, however utilizing CT and pathology images independently is limited by their differing imaging characteristics and the unique prognostic information.\nTo effectively address these challenges, this study proposes a novel framework that integrates prognostic capabilities of both CT and pathology images with clinical information, employing a multi-modal integration approach via multiple instance learning, leveraging large language models (LLMs) to analyze clinical notes and align them with image modalities.\nThe proposed approach was rigorously validated using external datasets from different hospitals, demonstrating superior performance over models reliant on vision or clinical data alone. This highlights the adaptability and strength of LLMs in managing complex multi-modal medical datasets for lung cancer prognosis, marking a significant advance towards more accurate and comprehensive patient care strategies.", "title":"LLM-guided Multi-modal Multiple Instance Learning for 5-year Overall Survival Prediction of Lung Cancer", "authors":[ "Kim, Kyungwon", "Lee, Yongmoon", "Park, Doohyun", "Eo, Taejoon", "Youn, Daemyung", "Lee, Hyesang", "Hwang, Dosik" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/KyleKWKim\/LLM-guided-Multimodal-MIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":536 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0560_paper.pdf", "bibtext":"@InProceedings{ Kim_Parameter_MICCAI2024,\n author = { Kim, Yumin and Choi, Gayoon and Hwang, Seong Jae },\n title = { { Parameter Efficient Fine Tuning for Multi-scanner PET to PET Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reducing scan time in Positron Emission Tomography (PET) imaging while maintaining high-quality images is crucial for minimizing patient discomfort and radiation exposure. Due to the limited size of datasets and distribution discrepancy across scanners in medical imaging, fine-tuning in a parameter-efficient and effective manner is on the rise. Motivated by the potential of Parameter Efficient Fine-Tuning (PEFT), we aim to address these issues by effectively leveraging PEFT to improve limited data and GPU resource issues in multi-scanner setups. In this paper, we introduce PETITE, Parameter Efficient Fine-Tuning for MultI-scanner PET to PET REconstruction, which represents the optimal PEFT combination when independently applying encoder-decoder components to each model architecture. To the best of our knowledge, this study is the first to systematically explore the efficacy of diverse PEFT techniques in medical imaging reconstruction tasks via prevalent encoder-decoder models. This investigation, in particular, brings intriguing insights into PETITE as we show further improvements by treating the encoder and decoder separately and mixing different PEFT methods, namely, Mix-PEFT. Using multi-scanner PET datasets comprised of five different scanners, we extensively test the cross-scanner PET scan time reduction performances (i.e., a model pre-trained on one scanner is fine-tuned on a different scanner) of 21 feasible Mix-PEFT combinations to derive optimal PETITE. We show that training with less than 1% parameters using PETITE performs on par with full fine-tuning (i.e., 100% parameter). Code is available at: https:\/\/github.com\/MICV-yonsei\/PETITE", "title":"Parameter Efficient Fine Tuning for Multi-scanner PET to PET Reconstruction", "authors":[ "Kim, Yumin", "Choi, Gayoon", "Hwang, Seong Jae" ], "id":"Conference", "arxiv_id":"2407.07517", "GitHub":[ "https:\/\/github.com\/MICV-yonsei\/PETITE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":537 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3161_paper.pdf", "bibtext":"@InProceedings{ Gen_Force_MICCAI2024,\n author = { Geng, Yimeng and Meng, Gaofeng and Chen, Mingcong and Cao, Guanglin and Zhao, Mingyang and Zhao, Jianbo and Liu, Hongbin },\n title = { { Force Sensing Guided Artery-Vein Segmentation via Sequential Ultrasound Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate identification of arteries and veins in ultrasound images is crucial for vascular examinations and interventions in robotics-assisted surgeries. However, current methods for ultrasound vessel segmentation face challenges in distinguishing between arteries and veins due to their morphological similarities. To address this challenge, this study introduces a novel force sensing guided segmentation approach to enhance artery-vein segmentation accuracy by leveraging their distinct deformability. Our proposed method utilizes force magnitude to identify key frames with the most significant vascular deformation in a sequence of ultrasound images. These key frames are then integrated with the current frame through attention mechanisms, with weights assigned in accordance with force magnitude. Our proposed force sensing guided framework can be seamlessly integrated into various segmentation networks and achieves significant performance improvements in multiple U-shaped networks such as U-Net, Swin-unet and Transunet. Furthermore, we contribute the first multimodal ultrasound artery-vein segmentation dataset, Mus-V, which encompasses both force and image data simultaneously. The dataset comprises 3114 ultrasound images of carotid and femoral vessels extracted from 105 videos, with corresponding force data recorded by the force sensor mounted on the US probe. The code and dataset can be available at https:\/\/www.kaggle.com\/datasets\/among22\/multimodal-ultrasound-vascular-segmentation", "title":"Force Sensing Guided Artery-Vein Segmentation via Sequential Ultrasound Images", "authors":[ "Geng, Yimeng", "Meng, Gaofeng", "Chen, Mingcong", "Cao, Guanglin", "Zhao, Mingyang", "Zhao, Jianbo", "Liu, Hongbin" ], "id":"Conference", "arxiv_id":"2407.21394", "GitHub":[ "https:\/\/github.com\/evelynskip\/artery-vein-segmentation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":538 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0661_paper.pdf", "bibtext":"@InProceedings{ Tan_OSALND_MICCAI2024,\n author = { Tang, Jiao and Yue, Yagao and Wan, Peng and Wang, Mingliang and Zhang, Daoqiang and Shao, Wei },\n title = { { OSAL-ND: Open-set Active Learning for Nucleus Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"The recent advance of deep learning has shown promising power for nucleus detection that plays an important role in histopathological examination. However, such accurate and reliable deep learning models need enough labeled data for training, which makes active learning (AL) an attractive learning paradigm for reducing the annotation efforts by pathologists. In open-set environments, AL encounters the challenge that the unlabeled data usually contains non-target samples from the unknown classes, resulting in the failure of most AL methods. Although AL has been explored in many open-set classification tasks, research on AL for nucleus detection in the open-set environment remains unexplored. To address the above issues, we propose a two-stage AL framework designed for nucleus detection in an open-set environment (i.e., OSAL-ND). In the first stage, we propose a prototype-based query strategy based on the auxiliary detector to select a candidate set from known classes as pure as possible. In the second stage, we further query the most uncertain samples from the candidate set for the nucleus detection task relying on the target detector. We evaluate the performance of our method on the NuCLS dataset, and the experimental results indicate that our method can not only improve the selection quality on the known classes, but also achieve higher detection accuracy with lower annotation burden in comparison with the existing studies.", "title":"OSAL-ND: Open-set Active Learning for Nucleus Detection", "authors":[ "Tang, Jiao", "Yue, Yagao", "Wan, Peng", "Wang, Mingliang", "Zhang, Daoqiang", "Shao, Wei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":539 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2329_paper.pdf", "bibtext":"@InProceedings{ Grz_TabMixer_MICCAI2024,\n author = { Grzeszczyk, Michal K. and Korzeniowski, Przemys\u0142aw and Alabed, Samer and Swift, Andrew J. and Trzcin\u0301ski, Tomasz and Sitek, Arkadiusz },\n title = { { TabMixer: Noninvasive Estimation of the Mean Pulmonary Artery Pressure via Imaging and Tabular Data Mixing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Right Heart Catheterization is a gold standard procedure for diagnosing Pulmonary Hypertension by measuring mean Pulmonary Artery Pressure (mPAP). It is invasive, costly, time-consuming and carries risks. In this paper, for the first time, we explore the estimation of mPAP from videos of noninvasive Cardiac Magnetic Resonance Imaging. To enhance the predictive capabilities of Deep Learning models used for this task, we introduce an additional modality in the form of demographic features and clinical measurements. Inspired by all-Multilayer Perceptron architectures, we present TabMixer, a novel module enabling the integration of imaging and tabular data through spatial, temporal and channel mixing. Specifically, we present the first approach that utilizes Multilayer Perceptrons to interchange tabular information with imaging features in vision models. We test TabMixer for mPAP estimation and show that it enhances the performance of Convolutional Neural Networks, 3D-MLP and Vision Transformers while being competitive with previous modules for imaging and tabular data. Our approach has the potential to improve clinical processes involving both modalities, particularly in noninvasive mPAP estimation, thus, significantly enhancing the quality of life for individuals affected by Pulmonary Hypertension. We provide a source code for using TabMixer at https:\/\/github.com\/SanoScience\/TabMixer.", "title":"TabMixer: Noninvasive Estimation of the Mean Pulmonary Artery Pressure via Imaging and Tabular Data Mixing", "authors":[ "Grzeszczyk, Michal K.", "Korzeniowski, Przemys\u0142aw", "Alabed, Samer", "Swift, Andrew J.", "Trzcin\u0301ski, Tomasz", "Sitek, Arkadiusz" ], "id":"Conference", "arxiv_id":"2409.07564", "GitHub":[ "https:\/\/github.com\/SanoScience\/TabMixer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":540 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2644_paper.pdf", "bibtext":"@InProceedings{ Wil_ProstNFound_MICCAI2024,\n author = { Wilson, Paul F. R. and To, Minh Nguyen Nhat and Jamzad, Amoon and Gilany, Mahdi and Harmanani, Mohamed and Elghareb, Tarek and Fooladgar, Fahimeh and Wodlinger, Brian and Abolmaesumi, Purang and Mousavi, Parvin },\n title = { { ProstNFound: Integrating Foundation Models with Ultrasound Domain Knowledge and Clinical Context for Robust Prostate Cancer Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Analysis of high-resolution micro-ultrasound data using deep learning presents a promising avenue for the accurate detection of prostate cancer (PCa). While previous efforts have focused on designing specialized architectures and training them from scratch, they are challenged by limited data availability. Medical foundation models, pre-trained on large and diverse datasets, offer a robust knowledge base that can be adapted to downstream tasks, reducing the need for large task specific datasets. However, their lack of specialized domain knowledge hinders their success: our initial research indicates that even with extensive fine-tuning, existing foundation models falls short of surpassing specialist models\u2019 performance for PCa detection. To address this gap, we propose ProstNFound, a method that empowers foundation models with domain-specific knowledge pertinent to ultrasound imaging and PCa. In this approach, while ultrasound images are fed to a foundation model, specialized auxiliary networks embed high-resolution textural features and clinical markers which are then presented to the network as prompts. Using a multi-center micro-ultrasound dataset with 693 patients, we demonstrate significant improvements over the state-of-the-art in PCa detection. ProstNFound achieves 90% sensitivity at 40% specificity, performance that is competitive with that of expert radiologists reading multi-parametric MRI or micro-ultrasound images, suggesting significant promise for clinical application. Our code will be made available at github.com.", "title":"ProstNFound: Integrating Foundation Models with Ultrasound Domain Knowledge and Clinical Context for Robust Prostate Cancer Detection", "authors":[ "Wilson, Paul F. R.", "To, Minh Nguyen Nhat", "Jamzad, Amoon", "Gilany, Mahdi", "Harmanani, Mohamed", "Elghareb, Tarek", "Fooladgar, Fahimeh", "Wodlinger, Brian", "Abolmaesumi, Purang", "Mousavi, Parvin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/pfrwilson\/prostNfound" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":541 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1932_paper.pdf", "bibtext":"@InProceedings{ Wan_Automated_MICCAI2024,\n author = { Wang, Jinge and Chen, Guilin and Wang, Xuefeng and Wu, Nan and Zhang, Terry Jianguo },\n title = { { Automated Robust Muscle Segmentation in Multi-level Contexts using a Probabilistic Inference Framework } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The paraspinal muscles are crucial for spinal stability, which can be quantitatively analyzed through image segmentation. However, unclear muscle boundaries, severe deformations, and limited training data impose great challenges for existing automatic segmentation methods. This study proposes an automated probabilistic inference framework to reconstruct 3D muscle shapes from thick-slice MRI robustly. Leveraging Fourier basis functions and Gaussian processes, we construct anatomically interpretable shape models. Multi-level contextual observations such as global poses of muscle centroids and local edges are then integrated into posterior estimation to enhance shape model initialization and optimization. The proposed framework is characterized by its intuitive representations and smooth generation capabilities, demonstrating higher accuracy in validation on both public and clinical datasets compared to state-of-the-art methods. The outcomes can aid clinicians and researchers in understanding muscle changes in various conditions, potentially enhancing diagnoses and treatments.", "title":"Automated Robust Muscle Segmentation in Multi-level Contexts using a Probabilistic Inference Framework", "authors":[ "Wang, Jinge", "Chen, Guilin", "Wang, Xuefeng", "Wu, Nan", "Zhang, Terry Jianguo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":542 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1948_paper.pdf", "bibtext":"@InProceedings{ Bou_PhenDiff_MICCAI2024,\n author = { Bourou, Anis and Boyer, Thomas and Gheisari, Marzieh and Daupin, K\u00e9vin and Dubreuil, V\u00e9ronique and De Thonel, Aur\u00e9lie and Mezger, Val\u00e9rie and Genovesio, Auguste },\n title = { { PhenDiff: Revealing Subtle Phenotypes with Diffusion Models in Real Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"For the past few years, deep generative models have increasingly been used in biological research for a variety of tasks. Recently, they have proven to be valuable for uncovering subtle cell phenotypic differences that are not directly discernible to the human eye. However, current methods employed to achieve this goal mainly rely on Generative Adversarial Networks (GANs). While effective, GANs encompass issues such as training instability and mode collapse, and they do not accurately map images back to the model\u2019s latent space, which is necessary to synthesize, manipulate, and thus interpret outputs based on real images. In this work, we introduce PhenDiff: a multi-class conditional method leveraging Diffusion Models (DMs) designed to identify shifts in cellular phenotypes by translating a real image from one condition to another. We qualitatively and quantitatively validate this method on cases where the phenotypic changes are visible or invisible, such as in low concentrations of drug treatments. Overall, PhenDiff represents a valuable tool for identifying cellular variations in real microscopy images. We anticipate that it could facilitate the understanding of diseases and advance drug discovery through the identification of novel biomarkers.", "title":"PhenDiff: Revealing Subtle Phenotypes with Diffusion Models in Real Images", "authors":[ "Bourou, Anis", "Boyer, Thomas", "Gheisari, Marzieh", "Daupin, K\u00e9vin", "Dubreuil, V\u00e9ronique", "De Thonel, Aur\u00e9lie", "Mezger, Val\u00e9rie", "Genovesio, Auguste" ], "id":"Conference", "arxiv_id":"2312.08290", "GitHub":[ "https:\/\/github.com\/WarmongeringBeaver\/PhenDiff" ], "paper_page":"https:\/\/huggingface.co\/papers\/2312.08290", "n_linked_authors":0, "upvotes":1, "num_comments":0, "n_authors":7, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":543 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0223_paper.pdf", "bibtext":"@InProceedings{ Hua_Endo4DGS_MICCAI2024,\n author = { Huang, Yiming and Cui, Beilei and Bai, Long and Guo, Ziqi and Xu, Mengya and Islam, Mobarakol and Ren, Hongliang },\n title = { { Endo-4DGS: Endoscopic Monocular Scene Reconstruction with 4D Gaussian Splatting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the realm of robot-assisted minimally invasive surgery, dynamic scene reconstruction can significantly enhance downstream tasks and improve surgical outcomes. Neural Radiance Fields (NeRF)-based methods have recently risen to prominence for their exceptional ability to reconstruct scenes but are hampered by slow inference speed, prolonged training, and inconsistent depth estimation. Some previous work utilizes ground truth depth for optimization but it is hard to acquire in the surgical domain. To overcome these obstacles, we present Endo-4DGS, a real-time endoscopic dynamic reconstruction approach that utilizes 3D Gaussian Splatting (GS) for 3D representation. Specifically, we propose lightweight MLPs to capture temporal dynamics with Gaussian deformation fields. To obtain a satisfactory Gaussian Initialization, we exploit a powerful depth estimation foundation model, Depth-Anything, to generate pseudo-depth maps as a geometry prior. We additionally propose confidence-guided learning to tackle the ill-pose problems in monocular depth estimation and enhance the depth-guided reconstruction with surface normal constraints and depth regularization. Our approach has been validated on two surgical datasets, where it can effectively render in real-time, compute efficiently, and reconstruct with remarkable accuracy. Our code is available at https:\/\/github.com\/lastbasket\/Endo-4DGS.", "title":"Endo-4DGS: Endoscopic Monocular Scene Reconstruction with 4D Gaussian Splatting", "authors":[ "Huang, Yiming", "Cui, Beilei", "Bai, Long", "Guo, Ziqi", "Xu, Mengya", "Islam, Mobarakol", "Ren, Hongliang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lastbasket\/Endo-4DGS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":544 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2284_paper.pdf", "bibtext":"@InProceedings{ Jud_Domain_MICCAI2024,\n author = { Judge, Arnaud and Judge, Thierry and Duchateau, Nicolas and Sandler, Roman A. and Sokol, Joseph Z. and Bernard, Olivier and Jodoin, Pierre-Marc },\n title = { { Domain Adaptation of Echocardiography Segmentation Via Reinforcement Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Performance of deep learning segmentation models is significantly challenged in its transferability across different medical imaging domains, particularly when aiming to adapt these models to a target domain with insufficient annotated data for effective fine-tuning. While existing domain adaptation (DA) methods propose strategies to alleviate this problem, these methods do not explicitly incorporate human-verified segmentation priors, compromising the potential of a model to produce anatomically plausible segmentations. We introduce RL4Seg, an innovative reinforcement learning framework that reduces the need to otherwise incorporate large expertly annotated datasets in the target domain, and eliminates the need for lengthy manual human review. Using a target dataset of 10,000 unannotated 2D echocardiographic images, RL4Seg not only outperforms existing state-of-the-art DA methods in accuracy but also achieves 99% anatomical validity on a subset of 220 expert-validated subjects from the target domain. Furthermore, our framework\u2019s reward network offers uncertainty estimates comparable with dedicated state-of-the-art uncertainty methods, demonstrating the utility and effectiveness of RL4Seg in overcoming DA challenges in medical image segmentation.", "title":"Domain Adaptation of Echocardiography Segmentation Via Reinforcement Learning", "authors":[ "Judge, Arnaud", "Judge, Thierry", "Duchateau, Nicolas", "Sandler, Roman A.", "Sokol, Joseph Z.", "Bernard, Olivier", "Jodoin, Pierre-Marc" ], "id":"Conference", "arxiv_id":"2406.17902", "GitHub":[ "https:\/\/github.com\/arnaudjudge\/RL4Seg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":545 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1156_paper.pdf", "bibtext":"@InProceedings{ Fen_Mining_MICCAI2024,\n author = { Feng, Siyang and Chen, Jiale and Liu, Zhenbing and Liu, Wentao and Wang, Zimin and Lan, Rushi and Pan, Xipeng },\n title = { { Mining Gold from the Sand: Weakly Supervised Histological Tissue Segmentation with Activation Relocalization and Mutual Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Class activation maps- (CAMs-) based image-level weakly supervised tissue segmentation has became a popular research topic due to the advantage of its low annotation cost. However, there are still two challenges exist in this task: (1) low-quality pseudo masks generation, and (2) training with noisy label supervision. To address these issues, we propose a novel weakly supervised segmentation framework with Activation Relocalization and Mutual Learning (ARML). First, we integrate an Activation Relocalization Scheme (ARS) into classification phase to more accurately cover the useful areas in initial CAMs. Second, to deal with the inevitably noisy annotations in pseudo masks generated by ARS, we propose a noise-robust mutual learning segmentation model. The model promotes peer networks to capture different characteristics of the outputs, and two noise suppression strategies namely samples weighted voting (SWV) and samples relation mining (SRM) are introduced to excavate the potential credible information from noisy annotations. Extensive experiments on BCSS and LUAD-HistoSeg datasets demonstrate that our proposed ARML exceeds many state-of-the-art weakly supervised semantic segmentation methods, which gives a new insight for tissue segmentation tasks. The code is available at: https:\/\/github.com\/director87\/ARML.", "title":"Mining Gold from the Sand: Weakly Supervised Histological Tissue Segmentation with Activation Relocalization and Mutual Learning", "authors":[ "Feng, Siyang", "Chen, Jiale", "Liu, Zhenbing", "Liu, Wentao", "Wang, Zimin", "Lan, Rushi", "Pan, Xipeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/director87\/ARML" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":546 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3434_paper.pdf", "bibtext":"@InProceedings{ Osu_Towards_MICCAI2024,\n author = { Osuala, Richard and Lang, Daniel M. and Verma, Preeti and Joshi, Smriti and Tsirikoglou, Apostolia and Skorupko, Grzegorz and Kushibar, Kaisar and Garrucho, Lidia and Pinaya, Walter H. L. and Diaz, Oliver and Schnabel, Julia A. and Lekadir, Karim },\n title = { { Towards Learning Contrast Kinetics with Multi-Condition Latent Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contrast agents in dynamic contrast enhanced magnetic resonance imaging allow to localize tumors and observe their contrast kinetics, which is essential for cancer characterization and respective treatment decision-making. However, contrast agent administration is not only associated with adverse health risks , but also restricted for patients during pregnancy, and for those with kidney malfunction, or other adverse reactions. With contrast uptake as key biomarker for lesion malignancy, cancer recurrence risk, and treatment response, it becomes pivotal to reduce the dependency on intravenous contrast agent administration. To this end, we propose a multi-conditional latent diffusion model capable of acquisition time-conditioned image synthesis of DCE-MRI temporal sequences. To evaluate medical image synthesis, we additionally propose and validate the Fr\u00e9chet radiomics distance as an image quality measure based on biomarker variability between synthetic and real imaging data. Our results demonstrate our method\u2019s ability to generate realistic multi-sequence fat-saturated breast DCE-MRI and uncover the emerging potential of deep learning based contrast kinetics simulation. We publicly share our accessible codebase at https:\/\/github.com\/RichardObi\/ccnet and provide a user-friendly library for Fr\u00e9chet radiomics distance calculation at https:\/\/pypi.org\/project\/frd-score.", "title":"Towards Learning Contrast Kinetics with Multi-Condition Latent Diffusion Models", "authors":[ "Osuala, Richard", "Lang, Daniel M.", "Verma, Preeti", "Joshi, Smriti", "Tsirikoglou, Apostolia", "Skorupko, Grzegorz", "Kushibar, Kaisar", "Garrucho, Lidia", "Pinaya, Walter H. L.", "Diaz, Oliver", "Schnabel, Julia A.", "Lekadir, Karim" ], "id":"Conference", "arxiv_id":"2403.13890", "GitHub":[ "https:\/\/github.com\/RichardObi\/frd-score" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":547 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2613_paper.pdf", "bibtext":"@InProceedings{ Li_Spatial_MICCAI2024,\n author = { Li, Chen and Hu, Xiaoling and Abousamra, Shahira and Xu, Meilong and Chen, Chao },\n title = { { Spatial Diffusion for Cell Layout Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generative models, such as GANs and diffusion models, have been used to augment training sets and boost performances in different tasks. We focus on generative models for cell detection instead, i.e., locating and classifying cells in given pathology images. One important information that has been largely overlooked is the spatial patterns of the cells. In this paper, we propose a spatial-pattern-guided generative model for cell layout generation. Specifically, a novel diffusion model guided by spatial features and generates realistic cell layouts has been proposed. We explore different density models as spatial features for the diffusion model. In downstream tasks, we show that the generated cell layouts can be used to guide the generation of high-quality pathology images. Augmenting with these images can significantly boost the performance of SOTA cell detection methods. The code is available at https:\/\/github.com\/superlc1995\/Diffusion-cell.", "title":"Spatial Diffusion for Cell Layout Generation", "authors":[ "Li, Chen", "Hu, Xiaoling", "Abousamra, Shahira", "Xu, Meilong", "Chen, Chao" ], "id":"Conference", "arxiv_id":"2409.03106", "GitHub":[ "https:\/\/github.com\/superlc1995\/Diffusion-cell" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":548 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2491_paper.pdf", "bibtext":"@InProceedings{ Yan_Simplify_MICCAI2024,\n author = { Yang, Xinquan and Li, Xuguang and Luo, Xiaoling and Zeng, Leilei and Zhang, Yudi and Shen, Linlin and Deng, Yongqiang },\n title = { { Simplify Implant Depth Prediction as Video Grounding: A Texture Perceive Implant Depth Prediction Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Surgical guide plate is an important tool for the dental implant surgery. However, the design process heavily relies on the dentist to manually simulate the implant angle and depth. When deep neural network has been applied to assist the dentist quickly locates the implant position, most of them are not able to determine the implant depth. Inspired by the video grounding task which localizes the starting and ending time of the target video segment, in this paper, we simplify the implant depth prediction as video grounding and develop a Texture Perceiver Implant Depth Prediction Network (TPNet), which enables us to directly output the imaplant depth without complex measurements of oral bone. TPNet consists of an implant region detector (IRD) and an implant depth prediction network (IDPNet). IRD is an object detector designed to crop the candidate implant volume from the CBCT, which greatly saves the computation resource. IDPNet takes the cropped CBCT data to predict the implant depth. A Texture Perceive Loss (TPL) is devised to enable the encoder of IDPNet to perceive the texture variation among slices. Extensive experiments on a large dental implant dataset demonstrated that the proposed TPNet achieves superior performance than the existing methods.", "title":"Simplify Implant Depth Prediction as Video Grounding: A Texture Perceive Implant Depth Prediction Network", "authors":[ "Yang, Xinquan", "Li, Xuguang", "Luo, Xiaoling", "Zeng, Leilei", "Zhang, Yudi", "Shen, Linlin", "Deng, Yongqiang" ], "id":"Conference", "arxiv_id":"2406.04603", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":549 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0290_paper.pdf", "bibtext":"@InProceedings{ Gow_Masks_MICCAI2024,\n author = { Gowda, Shreyank N. and Clifton, David A. },\n title = { { Masks and Manuscripts: Advancing Medical Pre-training with End-to-End Masking and Narrative Structuring } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contemporary medical contrastive learning faces challenges from inconsistent semantics and sample pair morphology, leading to dispersed and converging semantic shifts. The variability in text reports, due to multiple authors, complicates semantic consistency. To tackle these issues, we propose a two-step approach. Initially, text reports are converted into a standardized triplet format, laying the groundwork for our novel concept of \u201cobservations\u201d and \u201cverdicts.\u201d This approach refines the {Entity, Position, Exist} triplet into binary questions, guiding towards a clear \u201cverdict.\u201d We also innovate in visual pre-training with a Meijering-based masking, focusing on features representative of medical images\u2019 local context. By integrating this with our text conversion method, our model advances cross-modal representation in a multimodal contrastive learning framework, setting new benchmarks in medical image analysis.", "title":"Masks and Manuscripts: Advancing Medical Pre-training with End-to-End Masking and Narrative Structuring", "authors":[ "Gowda, Shreyank N.", "Clifton, David A." ], "id":"Conference", "arxiv_id":"2407.16264", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":550 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2509_paper.pdf", "bibtext":"@InProceedings{ Nov_Ataskconditional_MICCAI2024,\n author = { Novosad, Philip and Carano, Richard A. D. and Krishnan, Anitha Priya },\n title = { { A task-conditional mixture-of-experts model for missing modality segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate quantification of multiple sclerosis (MS) lesions using multi-contrast magnetic resonance imaging (MRI) plays a crucial role in disease assessment. While many methods for automatic MS lesion segmentation in MRI are available, these methods typically require a fixed set of MRI modalities as inputs. Such full multi-contrast inputs are not always acquired, limiting their utility in practice. To address this issue, a training strategy known as modality dropout (MD) has been widely adopted in the literature. However, models trained via MD still under-perform compared to dedicated models trained for particular modality configurations. In this work, we hypothesize that the poor performance of MD is the result of an overly constrained multi-task optimization problem. To reduce harmful task interference, we propose to incorporate task-conditional mixture-of-expert layers into our segmentation model, allowing different tasks to leverage different parameters subsets. Second, we propose a novel online self-distillation loss to help regularize the model and to explicitly promote model invariance to input modality configuration. Compared to standard MD training, our method demonstrates improved results on a large proprietary clinical trial dataset as well as on a small publicly available dataset of T2 lesions.", "title":"A task-conditional mixture-of-experts model for missing modality segmentation", "authors":[ "Novosad, Philip", "Carano, Richard A. D.", "Krishnan, Anitha Priya" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":551 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1593_paper.pdf", "bibtext":"@InProceedings{ Gu_Unsupervised_MICCAI2024,\n author = { Gu, Mingxuan and Thies, Mareike and Mei, Siyuan and Wagner, Fabian and Fan, Mingcheng and Sun, Yipeng and Pan, Zhaoya and Vesal, Sulaiman and Kosti, Ronak and Possart, Dennis and Utz, Jonas and Maier, Andreas },\n title = { { Unsupervised Domain Adaptation using Soft-Labeled Contrastive Learning with Reversed Monte Carlo Method for Cardiac Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent unsupervised domain adaptation methods in medical image segmentation adopt centroid\/prototypical contrastive learning (CL) to match the source and target features for their excellent ability of representation learning and semantic feature alignment. Of these CL methods, most works extract features with a binary mask generated by similarity measure or thresholding the prediction. However, this hard-threshold (HT) strategy may induce sparse features and incorrect label assignments. Conversely, while the soft-labeling technique has proven effective in addressing the limitations of the HT strategy by assigning importance factors to pixel features, it remains unexplored in CL algorithms. Thus, in this work, we present a novel CL approach leveraging soft pseudo labels for category-wise target centroid generation, complemented by a reversed Monte Carlo method to achieve a more compact target feature space. Additionally, we propose a centroid norm regularizer as an extra magnitude constraint to bolster the model\u2019s robustness. Extensive experiments and ablation studies on two cardiac data sets underscore the effectiveness of each component and reveal a significant enhancement in segmentation results in Dice Similarity Score and Hausdorff Distance 95 compared with a wide range of state-of-the-art methods.", "title":"Unsupervised Domain Adaptation using Soft-Labeled Contrastive Learning with Reversed Monte Carlo Method for Cardiac Image Segmentation", "authors":[ "Gu, Mingxuan", "Thies, Mareike", "Mei, Siyuan", "Wagner, Fabian", "Fan, Mingcheng", "Sun, Yipeng", "Pan, Zhaoya", "Vesal, Sulaiman", "Kosti, Ronak", "Possart, Dennis", "Utz, Jonas", "Maier, Andreas" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MingxuanGu\/Soft-Labeled-Contrastive-Learning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":552 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1045_paper.pdf", "bibtext":"@InProceedings{ Dai_Advancing_MICCAI2024,\n author = { Dai, Ling and Zhao, Kaitao and Li, Zhongyu and Zhu, Jihua and Liang, Libin },\n title = { { Advancing Sensorless Freehand 3D Ultrasound Reconstruction with a Novel Coupling Pad } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Sensorless freehand 3D ultrasound (US) reconstruction poses a significant challenge, yet it holds considerable importance in improving the accessibility of 3D US applications in clinics. Current mainstream solutions, relying on inertial measurement units or deep learning, encounter issues like cumulative drift. To overcome these limitations, we present a novel sensorless 3D US solution with two key contributions. Firstly, we develop a novel coupling pad for 3D US, which can be seamlessly integrated into the conventional 2D US scanning process. This pad, featuring 3 N-shaped lines, provides 3D spatial information without relying on external tracking devices. Secondly, we introduce a coarse-to-fine optimization method for calculating poses of sequential 2D US images. The optimization begins with a rough estimation of poses and undergoes refinement using a distance-topology discrepancy reduction strategy. The proposed method is validated by both simulation and practical phantom studies, demonstrating its superior performance compared to state-of-the-art methods and good accuracy in 3D US reconstruction.", "title":"Advancing Sensorless Freehand 3D Ultrasound Reconstruction with a Novel Coupling Pad", "authors":[ "Dai, Ling", "Zhao, Kaitao", "Li, Zhongyu", "Zhu, Jihua", "Liang, Libin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":553 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1914_paper.pdf", "bibtext":"@InProceedings{ Eic_PhysicsInformed_MICCAI2024,\n author = { Eichhorn, Hannah and Spieker, Veronika and Hammernik, Kerstin and Saks, Elisa and Weiss, Kilian and Preibisch, Christine and Schnabel, Julia A. },\n title = { { Physics-Informed Deep Learning for Motion-Corrected Reconstruction of Quantitative Brain MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose PHIMO, a physics-informed learning-based motion correction method tailored to quantitative MRI. PHIMO leverages information from the signal evolution to exclude motion-corrupted k-space lines from a data-consistent reconstruction. We demonstrate the potential of PHIMO for the application of T2* quantification from gradient echo MRI, which is particularly sensitive to motion due to its sensitivity to magnetic field inhomogeneities. A state-of-the-art technique for motion correction requires redundant acquisition of the k-space center, prolonging the acquisition.\nWe show that PHIMO can detect and exclude intra-scan motion events and, thus, correct for severe motion artifacts. PHIMO approaches the performance of the state-of-the-art motion correction method, while substantially reducing the acquisition time by over 40%, facilitating clinical applicability. Our code is available at https:\/\/github.com\/compai-lab\/2024-miccai-eichhorn.", "title":"Physics-Informed Deep Learning for Motion-Corrected Reconstruction of Quantitative Brain MRI", "authors":[ "Eichhorn, Hannah", "Spieker, Veronika", "Hammernik, Kerstin", "Saks, Elisa", "Weiss, Kilian", "Preibisch, Christine", "Schnabel, Julia A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-eichhorn" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":554 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2762_paper.pdf", "bibtext":"@InProceedings{ Wan_SIXNet_MICCAI2024,\n author = { Wang, Xinyi and Xu, Zikang and Zhu, Heqin and Yao, Qingsong and Sun, Yiyong and Zhou, S. Kevin },\n title = { { SIX-Net: Spatial-context Information miX-up for Electrode Landmark Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Catheter ablation is a prevalent procedure for treating atrial fibrillation, primarily utilizing catheters equipped with electrodes to gather electrophysiological signals. However, the localization of catheters in fluoroscopy images presents a challenge for clinicians due to the complexity of the intervention processes. In this paper, we propose SIX-Net, a novel algorithm intending to localize landmarks of electrodes in fluoroscopy images precisely, by mixing up spatial-context information from three aspects:\nFirst, we propose a new network architecture specially designed for global-local spatial feature aggregation; Then, we mix up spatial correlations between segmentation and landmark detection, by sequential connections between the two tasks with the help of the Segment Anything Model; Finally, a weighted loss function is carefully designed considering the relative spatial-arrangement information among electrodes in the same image. \nExperiment results on the test set and two clinical-challenging subsets reveal that our method outperforms several state-of-the-art landmark detection methods (~50% improvement for RF and ~25% improvement for CS).", "title":"SIX-Net: Spatial-context Information miX-up for Electrode Landmark Detection", "authors":[ "Wang, Xinyi", "Xu, Zikang", "Zhu, Heqin", "Yao, Qingsong", "Sun, Yiyong", "Zhou, S. Kevin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":555 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1678_paper.pdf", "bibtext":"@InProceedings{ Li_MoCoDiff_MICCAI2024,\n author = { Li, Feng and Zhou, Zijian and Fang, Yu and Cai, Jiangdong and Wang, Qian },\n title = { { MoCo-Diff: Adaptive Conditional Prior on Diffusion Network for MRI Motion Correction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Magnetic Resonance Image (MRI) is a powerful medical imaging modality with non-ionizing radiation. However, due to its long scanning time, patient movement is prone to occur during acquisition. Severe motions can significantly degrade the image quality and make the images non-diagnostic. This paper introduces MoCo-Diff, a novel two-stage deep learning framework designed to correct the motion artifacts in 3D MRI volumes. In the first stage, we exploit a novel attention mechanism using shift window-based transformers in both the in-slice and through-slice directions to effectively remove the motion artifacts. In the second stage, the initially-corrected image serves as the prior for realistic MR image restoration. This stage incorporates the pre-trained Stable Diffusion to leverage its robust generative capability and the ControlUNet to fine-tune the diffusion model with the assistance of the prior. Moreover, we introduce an uncertainty predictor to assess the reliability of the motion-corrected images, which not only visually hints the motion correction errors but also enhances motion correction quality by trimming the prior with dynamic weights. Our experiments illustrate MoCo-Diff\u2019s superiority over state-of-the-art approaches in removing motion artifacts and retaining anatomical details across different levels of motion severity. The code is available at https:\/\/github.com\/fengza\/MoCo-Diff.", "title":"MoCo-Diff: Adaptive Conditional Prior on Diffusion Network for MRI Motion Correction", "authors":[ "Li, Feng", "Zhou, Zijian", "Fang, Yu", "Cai, Jiangdong", "Wang, Qian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/fengza\/MoCo-Diff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":556 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2540_paper.pdf", "bibtext":"@InProceedings{ Zha_Mixed_MICCAI2024,\n author = { Zhang, Si-Miao and Wang, Jing and Wang, Yi-Xuan and Liu, Tao and Zhu, Haogang and Zhang, Han and Cheng, Jian },\n title = { { Mixed Integer Linear Programming for Discrete Sampling Scheme Design in Diffusion MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"In diffusion MRI (dMRI), a uniform single or multiple shell sampling scheme is typically required for data acquisition in q-space, because uniform spherical sampling offers the advantage of capturing more information using fewer samples, leading to superior reconstruction results. Uniform sampling problems can be categorized into continuous and discrete types. While most existing sampling methods focus on the continuous problem that is to design spherical samples continuously from single or multiple shells, this paper primarily investigates two discrete optimization problems, i.e., 1) optimizing the polarity of an existing scheme (P-P), and 2) optimizing the ordering of an existing scheme (P-O). Existing approaches for these two problems mainly rely on greedy algorithms, simulated annealing, and exhaustive search, which fail to obtain global optima within a reasonable timeframe. We propose several Mixed Integer Linear Programming (MILP) based methods to address these problems. To the best of our knowledge, this is the first work that solves these two discrete problems using MILP to obtain global optimal or sufficiently good solutions in 10 minutes. Experiments performed on single and multiple shells demonstrate that our MILP methods can achieve larger separation angles and lower electrostatic energy, resulting better reconstruction results, compared with existing approaches in commonly used software (i.e., CAMINO and MRtrix).", "title":"Mixed Integer Linear Programming for Discrete Sampling Scheme Design in Diffusion MRI", "authors":[ "Zhang, Si-Miao", "Wang, Jing", "Wang, Yi-Xuan", "Liu, Tao", "Zhu, Haogang", "Zhang, Han", "Cheng, Jian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":557 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3581_paper.pdf", "bibtext":"@InProceedings{ Feh_Intraoperative_MICCAI2024,\n author = { Fehrentz, Maximilian and Azampour, Mohammad Farid and Dorent, Reuben and Rasheed, Hassan and Galvin, Colin and Golby, Alexandra and Wells III, William M. and Frisken, Sarah and Navab, Nassir and Haouchine, Nazim },\n title = { { Intraoperative Registration by Cross-Modal Inverse Neural Rendering } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present in this paper a novel approach for 3D\/2D intraoperative registration during neurosurgery via cross-modal inverse neural rendering. Our approach separates implicit neural representation into two components, handling anatomical structure preoperatively and appearance intraoperatively. This disentanglement is achieved by controlling a Neural Radiance Field\u2019s appearance with a multi-style hypernetwork. Once trained, the implicit neural representation serves as a differentiable rendering engine, which can be used to estimate the surgical camera pose by minimizing the dissimilarity between its rendered images and the target intraoperative image. We tested our method on retrospective patients\u2019 data from clinical cases, showing that our method outperforms state-of-the-art while meeting current clinical standards for registration.", "title":"Intraoperative Registration by Cross-Modal Inverse Neural Rendering", "authors":[ "Fehrentz, Maximilian", "Azampour, Mohammad Farid", "Dorent, Reuben", "Rasheed, Hassan", "Galvin, Colin", "Golby, Alexandra", "Wells III, William M.", "Frisken, Sarah", "Navab, Nassir", "Haouchine, Nazim" ], "id":"Conference", "arxiv_id":"2409.11983", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":558 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0410_paper.pdf", "bibtext":"@InProceedings{ Rof_Feature_MICCAI2024,\n author = { Roffo, Giorgio and Biffi, Carlo and Salvagnini, Pietro and Cherubini, Andrea },\n title = { { Feature Selection Gates with Gradient Routing for Endoscopic Image Computing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"To address overfitting and enhance model generalization in gastroenterological polyp size assessment, our study introduces Feature Selection Gates (FSG) alongside Gradient Routing (GR) for dynamic feature selection. This technique aims to boost Convolutional Neural Networks (CNNs) and Vision Transformers (ViTs) by promoting sparse connectivity, thereby reducing overfitting and enhancing generalization. FSG achieves this through sparsification with learnable weights, serving as a regularization strategy. GR further refines this process by optimizing FSG parameters via dual forward passes, independently from the main model, to improve feature re-weighting. Our evaluation spanned multiple datasets, including CIFAR-100 for a broad impact assessment and specialized endoscopic datasets (REAL-Colon [12], Misawa [9], and SUN [13]) focusing on polyp size estimation, covering over 200 polyps in more than 370K frames. The findings indicate that our FSG-enhanced networks substantially enhance performance in both binary and triclass classification tasks related to polyp sizing. Specifically, CNNs experienced an F1 Score improvement to 87.8% in binary classification, while in triclass classification, the ViT-T model reached an F1 Score of 76.5%, outperforming traditional CNNs and ViT-T models. To facilitate further research, we are releasing our codebase, which includes implementations for CNNs, multistream CNNs, ViT, and FSG-augmented variants. This resource aims to standardize the use of endoscopic datasets, providing public training-validation-testing splits for reliable and comparable research in gastroenterological polyp size estimation. The codebase is available at github.com\/cosmoimd\/feature-selection-gates.", "title":"Feature Selection Gates with Gradient Routing for Endoscopic Image Computing", "authors":[ "Roffo, Giorgio", "Biffi, Carlo", "Salvagnini, Pietro", "Cherubini, Andrea" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cosmoimd\/feature-selection-gates" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":559 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0391_paper.pdf", "bibtext":"@InProceedings{ Pou_CARMFL_MICCAI2024,\n author = { Poudel, Pranav and Shrestha, Prashant and Amgain, Sanskar and Shrestha, Yash Raj and Gyawali, Prashnna and Bhattarai, Binod },\n title = { { CAR-MFL: Cross-Modal Augmentation by Retrieval for Multimodal Federated Learning with Missing Modalities } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multimodal AI has demonstrated superior performance over unimodal approaches by leveraging diverse data sources for more comprehensive analysis. However, applying this effectiveness in healthcare is challenging due to the limited availability of public datasets. Federated learning presents an exciting solution, allowing the use of extensive databases from hospitals and health centers without centralizing sensitive data, thus maintaining privacy and security. Yet, research in multimodal federated learning, particularly in scenarios with missing modalities\u2014a common issue in healthcare datasets\u2014remains scarce, highlighting a critical area for future exploration. Toward this, we propose a novel method for multimodal federated learning with missing modalities. Our contribution lies in a novel cross-modal data augmentation by retrieval, leveraging the small publicly available dataset to fill the missing modalities in the clients. Our method learns the parameters in a federated manner, ensuring privacy protection and improving performance in multiple challenging multimodal benchmarks in the medical domain, surpassing several competitive baselines.", "title":"CAR-MFL: Cross-Modal Augmentation by Retrieval for Multimodal Federated Learning with Missing Modalities", "authors":[ "Poudel, Pranav", "Shrestha, Prashant", "Amgain, Sanskar", "Shrestha, Yash Raj", "Gyawali, Prashnna", "Bhattarai, Binod" ], "id":"Conference", "arxiv_id":"2407.08648", "GitHub":[ "https:\/\/github.com\/bhattarailab\/CAR-MFL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":560 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3820_paper.pdf", "bibtext":"@InProceedings{ Kar_An_MICCAI2024,\n author = { Karimi, Davood },\n title = { { An approach to building foundation models for brain image analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing machine learning methods for brain image analysis are mostly based on supervised training. They require large labeled datasets, which can be costly or impossible to obtain. Moreover, the trained models are useful only for the narrow task defined by the labels. In this work, we developed a new method, based on the concept of foundation models, to overcome these limitations. Our model is an attention-based neural network that is trained using a novel self-supervised approach. Specifically, the model is trained to generate brain images in a patch-wise manner, thereby learning the brain structure. To facilitate learning of image details, we propose a new method that encodes high-frequency information using convolutional kernels with random weights. We trained our model on a pool of 10 public datasets. We then applied the model on five independent datasets to perform segmentation, lesion detection, denoising, and brain age estimation. Results showed that the foundation model achieved competitive or better results on all tasks, while significantly reducing the required amount of labeled training data. Our method enables leveraging large unlabeled neuroimaging datasets to effectively address diverse brain image analysis tasks and reduce the time and cost requirements of acquiring labels.", "title":"An approach to building foundation models for brain image analysis", "authors":[ "Karimi, Davood" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":561 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0298_paper.pdf", "bibtext":"@InProceedings{ Yeh_Insight_MICCAI2024,\n author = { Yeh, Chun-Hsiao and Wang, Jiayun and Graham, Andrew D. and Liu, Andrea J. and Tan, Bo and Chen, Yubei and Ma, Yi and Lin, Meng C. },\n title = { { Insight: A Multi-Modal Diagnostic Pipeline using LLMs for Ocular Surface Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate diagnosis of ocular surface diseases is critical in optometry and ophthalmology, which hinge on integrating clinical data sources (e.g., meibography imaging and clinical metadata). Traditional human assessments lack precision in quantifying clinical observations, while current machine-based methods often treat diagnoses as multi-class classification problems, limiting the diagnoses to a prede\ufb01ned closed-set of curated answers without reasoning the clinical relevance of each variable to the diagnosis. To tackle these challenges, we introduce an innovative multi-modal diagnostic pipeline (MDPipe) by employing large language models (LLMs) for ocular surface disease diagnosis. \nWe first employ a visual translator to interpret meibography images by converting them into quantifiable morphology data, facilitating their integration with clinical metadata and enabling the communication of nuanced medical insight to LLMs. To further advance this communication, we introduce a LLM-based summarizer to contextualize the insight from the combined morphology and clinical metadata, and generate clinical report summaries. Finally, we refine the LLMs\u2019 reasoning ability with domain-specific insight from real-life clinician diagnoses. Our evaluation across diverse ocular surface disease diagnosis benchmarks demonstrates that MDPipe outperforms existing standards, including GPT-4, and provides clinically sound rationales for diagnoses. The project is available at \\url{https:\/\/danielchyeh.github.io\/MDPipe\/}.", "title":"Insight: A Multi-Modal Diagnostic Pipeline using LLMs for Ocular Surface Disease Diagnosis", "authors":[ "Yeh, Chun-Hsiao", "Wang, Jiayun", "Graham, Andrew D.", "Liu, Andrea J.", "Tan, Bo", "Chen, Yubei", "Ma, Yi", "Lin, Meng C." ], "id":"Conference", "arxiv_id":"2410.00292", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":562 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2548_paper.pdf", "bibtext":"@InProceedings{ Zhu_Efficient_MICCAI2024,\n author = { Zhu, Yuanzhuo and Lian, Chunfeng and Li, Xianjun and Wang, Fan and Ma, Jianhua },\n title = { { Efficient Cortical Surface Parcellation via Full-Band Diffusion Learning at Individual Space } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cortical parcellation delineates the cerebral cortex into distinct regions based on anatomical and\/or functional criteria, a process crucial for neuroscientific research and clinical applications. Conventional methods for cortical parcellation involve spherical mapping and complex feature computation, which are time consuming and prone to error. Recent geometric learning approaches offer some improvements but may still depend on spherical mapping and could be sensitive to mesh variations. In this work, we present Cortex-Diffusion, a fully automatic framework for cortical parcellation on native cortical surfaces without spherical mapping or morphological feature extraction. Leveraging the DiffusionNet as its backbone, Cortex-Diffusion integrates a newly designed module for full-band spectral-accelerated spatial diffusion learning to adaptively aggregate information across highly convoluted meshes, allowing high-resolution geometric representation and accurate vertex-wise delineation. Using only raw 3D vertex coordinates, the model is compact, with merely 0.49 MB of learnable parameters. Extensive experiments on adult and infant datasets demonstrates that Cortex-Diffusion achieves superior accuracy and robustness in cortical parcellation.", "title":"Efficient Cortical Surface Parcellation via Full-Band Diffusion Learning at Individual Space", "authors":[ "Zhu, Yuanzhuo", "Lian, Chunfeng", "Li, Xianjun", "Wang, Fan", "Ma, Jianhua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":563 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3351_paper.pdf", "bibtext":"@InProceedings{ Wan_LIBR_MICCAI2024,\n author = { Wang, Dingrong and Azadvar, Soheil and Heiselman, Jon and Jiang, Xiajun and Miga, Michael and Wang, Linwei },\n title = { { LIBR+: Improving Intraoperative Liver Registration by Learning the Residual of Biomechanics-Based Deformable Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"The surgical environment imposes unique challenges to the\nintraoperative registration of organ shapes to their preoperatively-imaged\ngeometry. Biomechanical model-based registration remains popular, while\ndeep learning solutions remain limited due to the sparsity and variability\nof intraoperative measurements and the limited ground-truth deformation of an organ that can be obtained during the surgery. In this paper,\nwe propose a novel hybrid registration approach that leverage a linearized\niterative boundary reconstruction (LIBR) method based on linear elastic biomechanics, and use deep neural networks to learn its residual to\nthe ground-truth deformation (LIBR+). We further formulate a dualbranch spline-residual graph convolutional neural network (SR-GCN) to\nassimilate information from sparse and variable intraoperative measurements and e\ufb00ectively propagate it through the geometry of the 3D organ.\nExperiments on a large intraoperative liver registration dataset demonstrated the consistent improvements achieved by LIBR+ in comparison\nto existing rigid, biomechnical model-based non-rigid, and deep-learning\nbased non-rigid approaches to intraoperative liver registration.", "title":"LIBR+: Improving Intraoperative Liver Registration by Learning the Residual of Biomechanics-Based Deformable Registration", "authors":[ "Wang, Dingrong", "Azadvar, Soheil", "Heiselman, Jon", "Jiang, Xiajun", "Miga, Michael", "Wang, Linwei" ], "id":"Conference", "arxiv_id":"2403.06901", "GitHub":[ "https:\/\/github.com\/wdr123\/splineCNN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":564 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1214_paper.pdf", "bibtext":"@InProceedings{ Ily_AHybrid_MICCAI2024,\n author = { Ilyas, Zaid and Saleem, Afsah and Suter, David and Schousboe, John T. and Leslie, William D. and Lewis, Joshua R. and Gilani, Syed Zulqarnain },\n title = { { A Hybrid CNN-Transformer Feature Pyramid Network for Granular Abdominal Aortic Calcification Detection from DXA Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cardiovascular Diseases (CVDs) stand as the primary global cause of mortality, with Abdominal Aortic Calcification (AAC) being a stable marker of these conditions. AAC can be observed in Dual Energy X-ray absorptiometry (DXA) lateral view Vertebral Fracture Assessment (VFA) scans, usually performed for the detection of vertebral fractures. Early detection of AAC can help reduce the risk of developing clinical CVD by encouraging preventive measures. Recent efforts to automate DXA VFA image analysis for AAC detection are restricted to either predicting an overall AAC score, or they lack performance in granular AAC score prediction. The latter is important in helping clinicians predict CVD associated with the diminished Windkessel effect in the aorta. In this regard, we propose a hybrid Feature Pyramid Network (FPN) based CNN-Transformer architecture (Hybrid-FPN-AACNet) that employs a novel Dual Resolution Self-Attention (DRSA) mechanism to enhance context for self-attention by working on two different resolutions of the input feature map. Moreover, the proposed architecture also employs a novel Efficient Feature Fusion Module (EFFM) that efficiently combines the features from different hierarchies of Hybrid-FPN-AACNet for regression tasks. The proposed architecture has achieved State-Of-The-Art (SOTA) performance at a granular level compared to previous work.", "title":"A Hybrid CNN-Transformer Feature Pyramid Network for Granular Abdominal Aortic Calcification Detection from DXA Images", "authors":[ "Ilyas, Zaid", "Saleem, Afsah", "Suter, David", "Schousboe, John T.", "Leslie, William D.", "Lewis, Joshua R.", "Gilani, Syed Zulqarnain" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zaidilyas89\/Hybrid-FPN-AACNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":565 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3846_paper.pdf", "bibtext":"@InProceedings{ Bis_Adaptive_MICCAI2024,\n author = { Biswas, Koushik and Jha, Debesh and Tomar, Nikhil Kumar and Karri, Meghana and Reza, Amit and Durak, Gorkem and Medetalibeyoglu, Alpay and Antalek, Matthew and Velichko, Yury and Ladner, Daniela and Borhani, Amir and Bagci, Ulas },\n title = { { Adaptive Smooth Activation Function for Improved Organ Segmentation and Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The design of activation functions constitutes a cornerstone for deep learning (DL) applications, exerting a profound influence on the performance and capabilities of neural networks. This influence stems from their ability to introduce non-linearity into the network architecture. By doing so, activation functions empower the network to learn and model intricate data patterns and relationships, surpassing the limitations of linear models. In this study, we propose a new activation function, called {Adaptive Smooth Activation Unit \\textit{(\\textbf{ASAU})}}, tailored for optimized gradient propagation, thereby enhancing the proficiency of deep networks in medical image analysis. We apply this new activation function to two important and commonly used general tasks in medical image analysis: automatic disease diagnosis and organ segmentation in CT and MRI scans. Our rigorous evaluation on the \\textit{RadImageNet} abdominal\/pelvis (CT and MRI) demonstrates that our ASAU-integrated classification frameworks achieve a substantial improvement of 4.80\\% over ReLU based frameworks in classification accuracy for disease detection. Also, the proposed framework on Liver Tumor Segmentation (LiTS) 2017 Benchmarks obtains 1\\%-to-3\\% improvement in dice coefficient compared to widely used activations for segmentation tasks. The superior performance and adaptability of ASAU highlight its potential for integration into a wide range of image classification and segmentation tasks. The code is available at \\href{https:\/\/github.com\/koushik313\/ASAU}{https:\/\/github.com\/koushik313\/ASAU}.", "title":"Adaptive Smooth Activation Function for Improved Organ Segmentation and Disease Diagnosis", "authors":[ "Biswas, Koushik", "Jha, Debesh", "Tomar, Nikhil Kumar", "Karri, Meghana", "Reza, Amit", "Durak, Gorkem", "Medetalibeyoglu, Alpay", "Antalek, Matthew", "Velichko, Yury", "Ladner, Daniela", "Borhani, Amir", "Bagci, Ulas" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/koushik313\/ASAU" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":566 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0073_paper.pdf", "bibtext":"@InProceedings{ Jia_Towards_MICCAI2024,\n author = { Jiang, Yuncheng and Hu, Yiwen and Zhang, Zixun and Wei, Jun and Feng, Chun-Mei and Tang, Xuemei and Wan, Xiang and Liu, Yong and Cui, Shuguang and Li, Zhen },\n title = { { Towards a Benchmark for Colorectal Cancer Segmentation in Endorectal Ultrasound Videos: Dataset and Model Development } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Endorectal ultrasound (ERUS) is an important imaging modality that provides high reliability for diagnosing the depth and boundary of invasion in colorectal cancer. However, the lack of a large-scale ERUS dataset with high-quality annotations hinders the development of automatic ultrasound diagnostics. In this paper, we collected and annotated the first benchmark dataset that covers diverse ERUS scenarios, \\textit{i.e.} colorectal cancer segmentation, detection, and infiltration depth staging. Our ERUS-10K dataset comprises 77 videos and 10,000 high-resolution annotated frames. Based on this dataset, we further introduce a benchmark model for colorectal cancer segmentation, named the \\textbf{A}daptive \\textbf{S}parse-context \\textbf{TR}ansformer (\\textbf{ASTR}). ASTR is designed based on three considerations: scanning mode discrepancy, temporal information, and low computational complexity. For generalizing to different scanning modes, the adaptive scanning-mode augmentation is proposed to convert between raw sector images and linear scan ones. For mining temporal information, the sparse-context transformer is incorporated to integrate inter-frame local and global features. For reducing computational complexity, the sparse-context block is introduced to extract contextual features from auxiliary frames. Finally, on the benchmark dataset, the proposed ASTR model achieves a $77.6\\%$ Dice score in rectal cancer segmentation, largely outperforming previous state-of-the-art methods.", "title":"Towards a Benchmark for Colorectal Cancer Segmentation in Endorectal Ultrasound Videos: Dataset and Model Development", "authors":[ "Jiang, Yuncheng", "Hu, Yiwen", "Zhang, Zixun", "Wei, Jun", "Feng, Chun-Mei", "Tang, Xuemei", "Wan, Xiang", "Liu, Yong", "Cui, Shuguang", "Li, Zhen" ], "id":"Conference", "arxiv_id":"2408.10067", "GitHub":[ "" ], "paper_page":"https:\/\/huggingface.co\/papers\/2408.10067", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":10, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":567 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1723_paper.pdf", "bibtext":"@InProceedings{ Chu_RetMIL_MICCAI2024,\n author = { Chu, Hongbo and Sun, Qiehe and Li, Jiawen and Chen, Yuxuan and Zhang, Lizhong and Guan, Tian and Han, Anjia and He, Yonghong },\n title = { { RetMIL: Retentive Multiple Instance Learning for Histopathological Whole Slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Histopathological whole slide image (WSI) analysis using deep learning has become a research focus in computational pathology. The current basic paradigm is through the multiple instance learning (MIL) method, which uses a WSI as a bag and the cropped patches as instances. As Transformer has become the mainstream framework of neural networks, many MIL methods based on Transformer have been widely studied. They regard the patches as a sequence to complete tasks based on sequence analysis. However, the long sequence brought by the high heterogeneity and gigapixel nature of WSI will bring challenges to Transformer-based MIL such as high memory consumption, low inference speed, and even low inference performance. To this end, we propose a hierarchical retentive-based MIL method called RetMIL, which is adopted at local and global levels. At the local level, patches are divided into multiple subsequences, and each subsequence is updated through a parallel linear retention mechanism and aggregated by each patch embedding. At the global level, slide-level subsequence is obtained by a serial retention mechanism and attention pooling. And finally using a fully connected layer to predict category score. We conduct experiments on two public CAMELYON and BRACS datasets and an internal TCGASYS-LUNG dataset, confirming that RetMIL not only has state-of-the-art performance but also significantly reduces computational overhead.", "title":"RetMIL: Retentive Multiple Instance Learning for Histopathological Whole Slide Image Classification", "authors":[ "Chu, Hongbo", "Sun, Qiehe", "Li, Jiawen", "Chen, Yuxuan", "Zhang, Lizhong", "Guan, Tian", "Han, Anjia", "He, Yonghong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Hongbo-Chu\/RetMIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":568 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1280_paper.pdf", "bibtext":"@InProceedings{ Jia_Multimodal_MICCAI2024,\n author = { Jiang, Songhan and Gan, Zhengyu and Cai, Linghan and Wang, Yifeng and Zhang, Yongbing },\n title = { { Multimodal Cross-Task Interaction for Survival Analysis in Whole Slide Pathological Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival prediction, utilizing pathological images and genomic profiles, is increasingly important in cancer analysis and prognosis. Despite significant progress, precise survival analysis still faces two main challenges: \n(1) The massive pixels contained in whole slide images (WSIs) complicate the process of pathological images, making it difficult to generate an effective representation of the tumor microenvironment (TME).\n(2) Existing multimodal methods often rely on alignment strategies to integrate complementary information, which may lead to information loss due to the inherent heterogeneity between pathology and genes. \nIn this paper, we propose a Multimodal Cross-Task Interaction (MCTI) framework to explore the intrinsic correlations between subtype classification and survival analysis tasks. Specifically, to capture TME-related features in WSIs, we leverage the subtype classification task to mine tumor regions. Simultaneously, multi-head attention mechanisms are applied in genomic feature extraction, adaptively performing genes grouping to obtain task-related genomic embedding. With the joint representation of pathological images and genomic data, we further introduce a Transport-Guided Attention (TGA) module that uses optimal transport theory to model the correlation between subtype classification and survival analysis tasks, effectively transferring potential information. Extensive experiments demonstrate the superiority of our approaches, with MCTI outperforming state-of-the-art frameworks on three public benchmarks.", "title":"Multimodal Cross-Task Interaction for Survival Analysis in Whole Slide Pathological Images", "authors":[ "Jiang, Songhan", "Gan, Zhengyu", "Cai, Linghan", "Wang, Yifeng", "Zhang, Yongbing" ], "id":"Conference", "arxiv_id":"2406.17225", "GitHub":[ "https:\/\/github.com\/jsh0792\/MCTI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":569 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0584_paper.pdf", "bibtext":"@InProceedings{ Kon_AnatomicallyControllable_MICCAI2024,\n author = { Konz, Nicholas and Chen, Yuwen and Dong, Haoyu and Mazurowski, Maciej A. },\n title = { { Anatomically-Controllable Medical Image Generation with Segmentation-Guided Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models have enabled remarkably high-quality medical image generation, yet it is challenging to enforce anatomical constraints in generated images. To this end, we propose a diffusion model-based method that supports anatomically-controllable medical image generation, by following a multi-class anatomical segmentation mask at each sampling step. We additionally introduce a random mask ablation training algorithm to enable conditioning on a selected combination of anatomical constraints while allowing flexibility in other anatomical areas. We compare our method (\u201cSegGuidedDiff\u201d) to existing methods on breast MRI and abdominal\/neck-to-pelvis CT datasets with a wide range of anatomical objects. Results show that our method reaches a new state-of-the-art in the faithfulness of generated images to input anatomical masks on both datasets, and is on par for general anatomical realism. Finally, our model also enjoys the extra benefit of being able to adjust the anatomical similarity of generated images to real images of choice through interpolation in its latent space. SegGuidedDiff has many applications, including cross-modality translation, and the generation of paired or counterfactual data. Our code is available at https:\/\/github.com\/mazurowski-lab\/segmentation-guided-diffusion.", "title":"Anatomically-Controllable Medical Image Generation with Segmentation-Guided Diffusion Models", "authors":[ "Konz, Nicholas", "Chen, Yuwen", "Dong, Haoyu", "Mazurowski, Maciej A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mazurowski-lab\/segmentation-guided-diffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":570 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2030_paper.pdf", "bibtext":"@InProceedings{ Wan_Advancing_MICCAI2024,\n author = { Wang, Hongqiu and Luo, Xiangde and Chen, Wu and Tang, Qingqing and Xin, Mei and Wang, Qiong and Zhu, Lei },\n title = { { Advancing UWF-SLO Vessel Segmentation with Source-Free Active Domain Adaptation and a Novel Multi-Center Dataset } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate vessel segmentation in Ultra-Wide-Field Scanning Laser Ophthalmoscopy (UWF-SLO) images is crucial for diagnosing retinal diseases. Although recent techniques have shown encouraging outcomes in vessel segmentation, models trained on one medical dataset often underperform on others due to domain shifts. Meanwhile, manually labeling high-resolution UWF-SLO images is an extremely challenging, time-consuming and expensive task. In response, this study introduces a pioneering framework that leverages a patch-based active domain adaptation approach. By actively recommending a few valuable image patches by the devised Cascade Uncertainty-Predominance (CUP) selection strategy for labeling and model-finetuning, our method significantly improves the accuracy of UWF-SLO vessel segmentation across diverse medical centers. In addition, we annotate and construct the first Multi-center UWF-SLO Vessel Segmentation (MU-VS) dataset to promote this topic research, comprising data from multiple institutions. This dataset serves as a valuable resource for cross-center evaluation, verifying the effectiveness and robustness of our approach. Experimental results demonstrate that our approach surpasses existing domain adaptation and active learning methods, considerably reducing the gap between the Upper and Lower bounds with minimal annotations, highlighting our method\u2019s practical clinical value. We will release our dataset and code to facilitate relevant research (https:\/\/github.com\/whq-xxh\/SFADA-UWF-SLO).", "title":"Advancing UWF-SLO Vessel Segmentation with Source-Free Active Domain Adaptation and a Novel Multi-Center Dataset", "authors":[ "Wang, Hongqiu", "Luo, Xiangde", "Chen, Wu", "Tang, Qingqing", "Xin, Mei", "Wang, Qiong", "Zhu, Lei" ], "id":"Conference", "arxiv_id":"2406.13645", "GitHub":[ "https:\/\/github.com\/whq-xxh\/SFADA-UWF-SLO" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":571 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3051_paper.pdf", "bibtext":"@InProceedings{ Lam_Robust_MICCAI2024,\n author = { Lambert, Benjamin and Forbes, Florence and Doyle, Senan and Dojat, Michel },\n title = { { Robust Conformal Volume Estimation in 3D Medical Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Volumetry is one of the principal downstream applications of 3D medical image segmentation, for example, to detect abnormal tissue growth or for surgery planning. Conformal Prediction is a promising framework for uncertainty quantification, providing calibrated predictive intervals associated with automatic volume measurements. However, this methodology is based on the hypothesis that calibration and test samples are exchangeable, an assumption that is in practice often violated in medical image applications. A weighted formulation of Conformal Prediction can be framed to mitigate this issue, but its empirical investigation in the medical domain is still lacking. A potential reason is that it relies on the estimation of the density ratio between the calibration and test distributions, which is likely to be intractable in scenarios involving high-dimensional data. To circumvent this, we propose an efficient approach for density ratio estimation relying on the compressed latent representations generated by the segmentation model. Our experiments demonstrate the efficiency of our approach to reduce the coverage error in the presence of covariate shifts, in both synthetic and real-world settings.", "title":"Robust Conformal Volume Estimation in 3D Medical Images", "authors":[ "Lambert, Benjamin", "Forbes, Florence", "Doyle, Senan", "Dojat, Michel" ], "id":"Conference", "arxiv_id":"2407.19938", "GitHub":[ "https:\/\/github.com\/benolmbrt\/wcp_miccai" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":572 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1502_paper.pdf", "bibtext":"@InProceedings{ Beh_Leveraging_MICCAI2024,\n author = { Behrendt, Finn and Bhattacharya, Debayan and Mieling, Robin and Maack, Lennart and Kr\u00fcger, Julia and Opfer, Roland and Schlaefer, Alexander },\n title = { { Leveraging the Mahalanobis Distance to enhance Unsupervised Brain MRI Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Unsupervised Anomaly Detection (UAD) methods rely on healthy data distributions to identify anomalies as outliers. In brain MRI, a common approach is reconstruction-based UAD, where generative models reconstruct healthy brain MRIs, and anomalies are detected as deviations between input and reconstruction. However, this method is sensitive to imperfect reconstructions, leading to false positives that impede the segmentation. To address this limitation, we construct multiple reconstructions with probabilistic diffusion models. We then analyze the resulting distribution of these reconstructions using the Mahalanobis distance (MHD) to identify anomalies as outliers. By leveraging information about normal variations and covariance of individual pixels within this distribution, we effectively refine anomaly scoring, leading to improved segmentation. \nOur experimental results demonstrate substantial performance improvements across various data sets. Specifically, compared to relying solely on single reconstructions, our approach achieves relative improvements of 15.9%, 35.4%, 48.0%, and 4.7% in terms of AUPRC for the BRATS21, ATLAS, MSLUB and WMH data sets, respectively.", "title":"Leveraging the Mahalanobis Distance to enhance Unsupervised Brain MRI Anomaly Detection", "authors":[ "Behrendt, Finn", "Bhattacharya, Debayan", "Mieling, Robin", "Maack, Lennart", "Kr\u00fcger, Julia", "Opfer, Roland", "Schlaefer, Alexander" ], "id":"Conference", "arxiv_id":"2407.12474", "GitHub":[ "https:\/\/github.com\/FinnBehrendt\/Mahalanobis-Unsupervised-Anomaly-Detection" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":573 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2052_paper.pdf", "bibtext":"@InProceedings{ Du_The_MICCAI2024,\n author = { Du, Yuning and Dharmakumar, Rohan and Tsaftaris, Sotirios A. },\n title = { { The MRI Scanner as a Diagnostic: Image-less Active Sampling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite the high diagnostic accuracy of Magnetic Resonance Imaging (MRI), using MRI as a Point-of-Care (POC) disease identification tool poses significant accessibility challenges due to the use of high magnetic field strength and lengthy acquisition times.\nWe ask a simple question: Can we dynamically optimise acquired samples, at the patient level, according to an (automated) downstream decision task, while discounting image reconstruction?\nWe propose an ML-based framework that learns an active sampling strategy, via reinforcement learning, at a patient-level to directly infer disease from undersampled k-space. We validate our approach by inferring Meniscus Tear in undersampled knee MRI data, where we achieve diagnostic performance comparable with ML-based diagnosis, using fully sampled k-space data. We analyse task-specific sampling policies, showcasing the adaptability of our active sampling approach. The introduced frugal sampling strategies have the potential to reduce high field strength requirements that in turn strengthen the viability of MRI-based POC disease identification and associated preliminary screening tools.", "title":"The MRI Scanner as a Diagnostic: Image-less Active Sampling", "authors":[ "Du, Yuning", "Dharmakumar, Rohan", "Tsaftaris, Sotirios A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/vios-s\/MRI_Active_Sampling" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":574 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1861_paper.pdf", "bibtext":"@InProceedings{ Bou_3D_MICCAI2024,\n author = { Bourigault, Emmanuelle and Jamaludin, Amir and Zisserman, Andrew },\n title = { { 3D Spine Shape Estimation from Single 2D DXA } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Scoliosis is currently assessed solely on 2D lateral deviations, but recent studies have also revealed the importance of other imaging planes in understanding the deformation of the spine. Consequently, extracting the spinal geometry in 3D would help quantify these spinal deformations and aid diagnosis. \nIn this study, we propose an automated general framework to estimate the {\\em 3D }spine shape from {\\em 2D} DXA scans. We achieve this by explicitly predicting the sagittal view of the spine from the DXA scan. Using these two orthogonal projections of the spine (coronal in DXA, and sagittal from the prediction), we are able to describe the 3D shape of the spine.\nThe prediction is learnt from over 30k paired images of DXA and MRI scans. We assess the performance of the method on a held out test set, and achieve high accuracy. Our code is available at \\href{https:\/\/github.com\/EmmanuelleB985\/DXA_to_3D}{https:\/\/github.com\/EmmanuelleB985\/DXA-to-3D.}", "title":"3D Spine Shape Estimation from Single 2D DXA", "authors":[ "Bourigault, Emmanuelle", "Jamaludin, Amir", "Zisserman, Andrew" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/EmmanuelleB985\/DXA-to-3D" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":575 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0640_paper.pdf", "bibtext":"@InProceedings{ Che_Can_MICCAI2024,\n author = { Chen, Jiawei and Jiang, Yue and Yang, Dingkang and Li, Mingcheng and Wei, Jinjie and Qian, Ziyun and Zhang, Lihua },\n title = { { Can LLMs\u2019 Tuning Methods Work in Medical Multimodal Domain? } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"While Large Language Models (LLMs) excel in world knowledge understanding, adapting them to specific subfields requires precise adjustments. Due to the model\u2019s vast scale, traditional global fine-tuning methods for large models can be computationally expensive and impact generalization. To address this challenge, a range of innovative Parameters-Efficient Fine-Tuning (PEFT) methods have emerged and achieved remarkable success in both LLMs and Large Vision-Language Models (LVLMs). In the medical domain, fine-tuning a medical Vision-Language Pretrained (VLP) model is essential for adapting it to specific tasks. Can the fine-tuning methods for large models be transferred to the medical field to enhance transfer learning efficiency? In this paper, we delve into the fine-tuning methods of LLMs and conduct extensive experiments to investigate the impact of fine-tuning methods for large models on the existing multimodal model in the medical domain from the training data level and the model structure level. We show the different impacts of fine-tuning methods for large models on medical VLMs and develop the most efficient ways to fine-tune medical VLP models. We hope this research can guide medical domain researchers in optimizing VLMs\u2019 training costs, fostering the broader application of VLMs in healthcare fields. The code and dataset have been released at https:\/\/github.com\/TIMMY-CHAN\/MILE.", "title":"Can LLMs\u2019 Tuning Methods Work in Medical Multimodal Domain?", "authors":[ "Chen, Jiawei", "Jiang, Yue", "Yang, Dingkang", "Li, Mingcheng", "Wei, Jinjie", "Qian, Ziyun", "Zhang, Lihua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/TIMMY-CHAN\/MILE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":576 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1025_paper.pdf", "bibtext":"@InProceedings{ Yua_HecVL_MICCAI2024,\n author = { Yuan, Kun and Srivastav, Vinkle and Navab, Nassir and Padoy, Nicolas },\n title = { { HecVL: Hierarchical Video-Language Pretraining for Zero-shot Surgical Phase Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Natural language could play an important role in developing generalist surgical models by providing a broad source of supervision from raw texts. This flexible form of supervision can enable the model\u2019s transferability across datasets and tasks as natural language can be used to reference learned visual concepts or describe new ones. In this work, we present HecVL, a novel hierarchical video-language pretraining approach for building a generalist surgical model. Specifically, we construct a hierarchical video-text paired dataset by pairing the surgical lecture video with three hierarchical levels of texts: at clip-level, atomic actions using transcribed audio texts; at phase-level, conceptual text summaries; and at video-level, overall abstract text of the surgical procedure. Then, we propose a novel fine-to-coarse contrastive learning framework that learns separate embedding spaces for the three video-text hierarchies using a single model. By disentangling embedding spaces of different hierarchical levels, the learned multi-modal representations encode short-term and long-term surgical concepts in the same model. Thanks to the injected textual semantics, we demonstrate that the HecVL approach can enable zero-shot surgical phase recognition without any human annotation. Furthermore, we show that the same HecVL model for surgical phase recognition can be transferred across different surgical procedures and medical centers. The source code will be made available at https:\/\/github.com\/CAMMA-public\/HecVL", "title":"HecVL: Hierarchical Video-Language Pretraining for Zero-shot Surgical Phase Recognition", "authors":[ "Yuan, Kun", "Srivastav, Vinkle", "Navab, Nassir", "Padoy, Nicolas" ], "id":"Conference", "arxiv_id":"2405.10075", "GitHub":[ "https:\/\/github.com\/CAMMA-public\/HecVL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":577 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2133_paper.pdf", "bibtext":"@InProceedings{ Kon_Achieving_MICCAI2024,\n author = { Kong, Qingpeng and Chiu, Ching-Hao and Zeng, Dewen and Chen, Yu-Jen and Ho, Tsung-Yi and Hu, Jingtong and Shi, Yiyu },\n title = { { Achieving Fairness Through Channel Pruning for Dermatological Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Numerous studies have revealed that deep learning-based medical image classification models may exhibit bias towards specific demographic attributes, such as race, gender, and age. Existing bias mitigation methods often achieve a high level of fairness at the cost of significant accuracy degradation. In response to this challenge, we propose an innovative and adaptable Soft Nearest Neighbor Loss-based channel pruning framework, which achieves fairness through channel pruning. Traditionally, channel pruning is utilized to accelerate neural network inference. However, our work demonstrates that pruning can also be a potent tool for achieving fairness. Our key insight is that different channels in a layer contribute differently to the accuracy of different groups. By selectively pruning critical channels that lead to the accuracy difference between the privileged and unprivileged groups, we can effectively improve fairness without sacrificing accuracy significantly. Experiments conducted on two skin lesion diagnosis datasets across multiple sensitive attributes validate the effectiveness of our method in achieving a state-of-the-art trade-off between accuracy and fairness. Our code is available at https:\/\/github.com\/Kqp1227\/Sensitive-Channel-Pruning.", "title":"Achieving Fairness Through Channel Pruning for Dermatological Disease Diagnosis", "authors":[ "Kong, Qingpeng", "Chiu, Ching-Hao", "Zeng, Dewen", "Chen, Yu-Jen", "Ho, Tsung-Yi", "Hu, Jingtong", "Shi, Yiyu" ], "id":"Conference", "arxiv_id":"2405.08681", "GitHub":[ "https:\/\/github.com\/Kqp1227\/Sensitive-Channel-Pruning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":578 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0761_paper.pdf", "bibtext":"@InProceedings{ Che_WsiCaption_MICCAI2024,\n author = { Chen, Pingyi and Li, Honglin and Zhu, Chenglu and Zheng, Sunyi and Shui, Zhongyi and Yang, Lin },\n title = { { WsiCaption: Multiple Instance Generation of Pathology Reports for Gigapixel Whole-Slide Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Whole slide images are the foundation of digital pathology for the diagnosis and treatment of carcinomas. Writing pathology reports is laborious and error-prone for inexperienced pathologists. To reduce the workload and improve clinical automation, we investigate how to generate pathology reports given whole slide images. On the data end, we curated the largest WSI-text dataset (PathText). In specific, we collected nearly 10000 high-quality WSI-text pairs for visual-language models by recognizing and cleaning pathology reports which narrate diagnostic slides in TCGA. On the model end, we propose the multiple instance generative model (MI-Gen) which can produce pathology reports for gigapixel WSIs. We benchmark our model on the largest subset of PathText. Experimental results show our model can generate pathology reports which contain multiple clinical clues and achieve competitive performance on certain slide-level tasks. We observe that simple semantic extraction from the pathology reports can achieve the best performance (0.838 of F1 score) on BRCA subtyping surpassing previous state-of-the-art approaches. Our collected dataset and related code are available at\nhttps:\/\/github.com\/cpystan\/Wsi-Caption.", "title":"WsiCaption: Multiple Instance Generation of Pathology Reports for Gigapixel Whole-Slide Images", "authors":[ "Chen, Pingyi", "Li, Honglin", "Zhu, Chenglu", "Zheng, Sunyi", "Shui, Zhongyi", "Yang, Lin" ], "id":"Conference", "arxiv_id":"2311.16480", "GitHub":[ "https:\/\/github.com\/cpystan\/Wsi-Caption\/tree\/master" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":579 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1423_paper.pdf", "bibtext":"@InProceedings{ Ma_Adaptive_MICCAI2024,\n author = { Ma, Siteng and Du, Honghui and Curran, Kathleen M. and Lawlor, Aonghus and Dong, Ruihai },\n title = { { Adaptive Curriculum Query Strategy for Active Learning in Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep active learning (AL) is commonly used to reduce labeling costs in medical image analysis. Deep learning (DL) models typically exhibit a preference for learning from easy data and simple patterns before they learn from complex ones. However, existing AL methods often employ a fixed query strategy for sample selection, which may cause the model to focus too closely on challenging-to-classify data. The result is a deceleration of the convergence of DL models and an increase in the amount of labeled data required to train them. To address this issue, we propose a novel Adaptive Curriculum Query Strategy for AL in Medical Image Classification. During the training phase, our strategy leverages Curriculum Learning principles to initially prioritize the selection of a diverse range of samples to cover various difficulty levels, facilitating rapid model convergence. Once the distribution of the selected samples closely matches that of the entire dataset, the query strategy shifts its focus towards difficult-to-classify data based on uncertainty. This novel approach enables the model to achieve superior performance with fewer labeled samples. We perform extensive experiments demonstrating that our model not only requires fewer labeled samples but outperforms state-of-the-art models in terms of efficiency and effectiveness. The code is publicly available at https:\/\/github.com\/HelenMa9998\/Easy_hard_AL.", "title":"Adaptive Curriculum Query Strategy for Active Learning in Medical Image Classification", "authors":[ "Ma, Siteng", "Du, Honghui", "Curran, Kathleen M.", "Lawlor, Aonghus", "Dong, Ruihai" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HelenMa9998\/Easy_hard_AL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":580 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1733_paper.pdf", "bibtext":"@InProceedings{ Xia_GMoD_MICCAI2024,\n author = { Xiang, ZhiPeng and Cui, ShaoGuo and Shang, CaoZhi and Jiang, Jingfeng and Zhang, Liqiang },\n title = { { GMoD: Graph-driven Momentum Distillation Framework with Active Perception of Disease Severity for Radiology Report Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic radiology report generation is a challenging task that seeks to produce comprehensive and semantically consistent detailed descriptions from radiography (e.g., X-ray), alleviating the heavy workload of radiologists. Previous work explored the introduction of diagnostic information through multi-label classification. However, such methods can only provide a binary positive or negative classification result, leading to the omission of critical information regarding disease severity. We propose a Graph-driven Momentum Distillation (GMoD) approach to guide the model in actively perceiving the apparent disease severity implicitly conveyed in each radiograph. The proposed GMoD introduces two novel modules: Graph-based Topic Classifier (GTC) and Momentum Topic-Signal Distiller (MTD). Specifically, GTC combines symptoms and lung diseases to build topic maps and focuses on potential connections between them. MTD constrains the GTC to focus on the confidence of each disease being negative or positive by constructing pseudo labels, and then uses the multi-label classification results to assist the model in perceiving joint features to generate a more accurate report. Extensive experiments and analyses on IU-Xray and MIMIC-CXR benchmark datasets demonstrate that our GMoD outperforms state-of-the-art method. Our code is available at https:\/\/github.com\/xzp9999\/GMoD-mian.", "title":"GMoD: Graph-driven Momentum Distillation Framework with Active Perception of Disease Severity for Radiology Report Generation", "authors":[ "Xiang, ZhiPeng", "Cui, ShaoGuo", "Shang, CaoZhi", "Jiang, Jingfeng", "Zhang, Liqiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xzp9999\/GMoD-mian" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":581 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2165_paper.pdf", "bibtext":"@InProceedings{ Shi_Integrative_MICCAI2024,\n author = { Shi, Zhan and Zhang, Jingwei and Kong, Jun and Wang, Fusheng },\n title = { { Integrative Graph-Transformer Framework for Histopathology Whole Slide Image Representation and Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"In digital pathology, the multiple instance learning (MIL) strategy is widely used in the weakly supervised histopathology whole slide image (WSI) classification task where giga-pixel WSIs are only labeled at the slide level. However, existing attention-based MIL approaches often overlook contextual information and intrinsic spatial relationships between neighboring tissue tiles, while graph-based MIL frameworks have limited power to recognize the long-range dependencies. In this paper, we introduce the integrative graph-transformer framework that simultaneously captures the context-aware relational features and global WSI representations through a novel Graph Transformer Integration (GTI) block. Specifically, each GTI block consists of a Graph Convolutional Network (GCN) layer modeling neighboring relations at the local instance level and an efficient global attention model capturing comprehensive global information from extensive feature embeddings. Extensive experiments on three publicly available WSI datasets: TCGA-NSCLC, TCGA-RCC and BRIGHT, demonstrate the superiority of our approach over current state-of-the-art MIL methods, achieving an improvement of 1.0% to 2.6% in accuracy and 0.7%-1.6% in AUROC.", "title":"Integrative Graph-Transformer Framework for Histopathology Whole Slide Image Representation and Classification", "authors":[ "Shi, Zhan", "Zhang, Jingwei", "Kong, Jun", "Wang, Fusheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/StonyBrookDB\/igt-wsi" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":582 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1637_paper.pdf", "bibtext":"@InProceedings{ Ma_PX2Tooth_MICCAI2024,\n author = { Ma, Wen and Wu, Huikai and Xiao, Zikai and Feng, Yang and Wu, Jian and Liu, Zuozhu },\n title = { { PX2Tooth: Reconstructing the 3D Point Cloud Teeth from a Single Panoramic X-ray } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstructing the 3D anatomical structures of the oral cavity, which originally reside in the cone-beam CT (CBCT), from a single 2D Panoramic X-ray(PX) remains a critical yet challenging task, as it can effectively reduce radiation risks and treatment costs during the diagnostic in digital dentistry. However, current methods are either error-prone or only trained\/evaluated on small-scale datasets (less than 50 cases), resulting in compromised trustworthiness. In this paper, we propose PX2Tooth, a novel approach to reconstruct 3D teeth using a single PX image with a two-stage framework. First, we design the PXSegNet to segment the permanent teeth from the PX images, providing clear positional, morphological, and categorical information for each tooth. Subsequently, we design a novel tooth generation network (TGNet) that learns to transform random point clouds into 3D teeth. TGNet integrates the segmented patch information and introduces a Prior Fusion Module (PFM) to enhance the generation quality, especially in the root apex region. Moreover, we construct a dataset comprising 499 pairs of CBCT and Panoramic X-rays. Extensive experiments demonstrate that PX2Tooth can achieve an Intersection over Union (IoU) of 0.793, significantly surpassing previous methods, underscoring the great potential of artificial intelligence in digital dentistry.", "title":"PX2Tooth: Reconstructing the 3D Point Cloud Teeth from a Single Panoramic X-ray", "authors":[ "Ma, Wen", "Wu, Huikai", "Xiao, Zikai", "Feng, Yang", "Wu, Jian", "Liu, Zuozhu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":583 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1472_paper.pdf", "bibtext":"@InProceedings{ Jia_M4oE_MICCAI2024,\n author = { Jiang, Yufeng and Shen, Yiqing },\n title = { { M4oE: A Foundation Model for Medical Multimodal Image Segmentation with Mixture of Experts } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical imaging data is inherently heterogeneous across different modalities and clinical centers, posing unique challenges for developing generalizable foundation models. Conventional entail training distinct models per dataset or using a shared encoder with modality-specific decoders. However, these approaches incur heavy computational overheads and suffer from poor scalability. To address these limitations, we propose the Medical Multi-Modal Mixture of Experts (M4oE) framework, leveraging the SwinUNet architecture. Specifically, M4oE comprises modality-specific experts, each separately initialized to learn features encoding domain knowledge. Subsequently, a gating network is integrated during fine-tuning to dynamically modulate each expert\u2019s contribution to the collective predictions. This enhances model interpretability as well as the generalization ability while retaining expertise specialization. Simultaneously, the M4oE architecture amplifies the model\u2019s parallel processing capabilities, and it also ensures the model\u2019s adaptation to new modalities with ease. Experiments across three modalities reveal that M4oE can achieve 3.45% over STU-Net-L, 5.11% over MED3D, and 11.93% over SAM-Med2D across the MICCAI FLARE22, AMOS2022, and ATLAS2023 datasets. Moreover, M4oE showcases a significant reduction in training duration with 7 hours less, while maintaining a parameter count that is only 30% of its compared methods. The code is available at https:\/\/github.com\/JefferyJiang-YF\/M4oE.", "title":"M4oE: A Foundation Model for Medical Multimodal Image Segmentation with Mixture of Experts", "authors":[ "Jiang, Yufeng", "Shen, Yiqing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JefferyJiang-YF\/M4oE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":584 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1272_paper.pdf", "bibtext":"@InProceedings{ Hou_ConceptAttention_MICCAI2024,\n author = { Hou, Junlin and Xu, Jilan and Chen, Hao },\n title = { { Concept-Attention Whitening for Interpretable Skin Lesion Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The black-box nature of deep learning models has raised concerns about their interpretability for successful deployment in real-world clinical applications. To address the concerns, eXplainable Artificial Intelligence (XAI) aims to provide clear and understandable explanations of the decision-making process. In the medical domain, concepts such as attributes of lesions or abnormalities serve as key evidence for deriving diagnostic results. Existing concept-based models mainly depend on concepts that appear independently and require fine-grained concept annotations such as bounding boxes. However, a medical image usually contains multiple concepts, and the fine-grained concept annotations are difficult to acquire. In this paper, we aim to interpret representations in deep neural networks by aligning the axes of the latent space with known concepts of interest. We propose a novel Concept-Attention Whitening (CAW) framework for interpretable skin lesion diagnosis. CAW is comprised of a disease diagnosis branch and a concept alignment branch. In the former branch, we train a convolutional neural network (CNN) with an inserted CAW layer to perform skin lesion diagnosis. The CAW layer decorrelates features and aligns image features to conceptual meanings via an orthogonal matrix. In the latter branch, the orthogonal matrix is calculated under the guidance of the concept attention mask. We particularly introduce a weakly-supervised concept mask generator that only leverages coarse concept labels for filtering local regions that are relevant to certain concepts, improving the optimization of the orthogonal matrix. Extensive experiments on two public skin lesion diagnosis datasets demonstrated that CAW not only enhanced interpretability but also maintained a state-of-the-art diagnostic performance.", "title":"Concept-Attention Whitening for Interpretable Skin Lesion Diagnosis", "authors":[ "Hou, Junlin", "Xu, Jilan", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2404.05997", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":585 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2014_paper.pdf", "bibtext":"@InProceedings{ Li_Anatomical_MICCAI2024,\n author = { Li, Qingqiu and Yan, Xiaohan and Xu, Jilan and Yuan, Runtian and Zhang, Yuejie and Feng, Rui and Shen, Quanli and Zhang, Xiaobo and Wang, Shujun },\n title = { { Anatomical Structure-Guided Medical Vision-Language Pre-training } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Learning medical visual representations through vision-language pre-training has reached remarkable progress. Despite the promising performance, it still faces challenges, i.e., local alignment lacks interpretability and clinical relevance, and the insufficient internal and external representation learning of image-report pairs. To address these issues, we propose an Anatomical Structure-Guided (ASG) framework. Specifically, we parse raw reports into triplets , and fully utilize each element as supervision to enhance representation learning. For anatomical region, we design an automatic anatomical region-sentence alignment paradigm in collaboration with radiologists, considering them as the minimum semantic units to explore fine-grained local alignment. For finding and existence, we regard them as image tags, applying an image-tag recognition decoder to associate image features with their respective tags within each sample and constructing soft labels for contrastive learning to improve the semantic association of different image-report pairs. We evaluate the proposed ASG framework on two downstream tasks, including five public benchmarks. Experimental results demonstrate that our method outperforms the state-of-the-art methods. Our code is available at https:\/\/asgmvlp.github.io.", "title":"Anatomical Structure-Guided Medical Vision-Language Pre-training", "authors":[ "Li, Qingqiu", "Yan, Xiaohan", "Xu, Jilan", "Yuan, Runtian", "Zhang, Yuejie", "Feng, Rui", "Shen, Quanli", "Zhang, Xiaobo", "Wang, Shujun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ASGMVLP\/ASGMVLP_CODE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":586 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1841_paper.pdf", "bibtext":"@InProceedings{ Liu_Featureprompting_MICCAI2024,\n author = { Liu, Xueyu and Shi, Guangze and Wang, Rui and Lai, Yexin and Zhang, Jianan and Sun, Lele and Yang, Quan and Wu, Yongfei and Li, Ming and Han, Weixia and Zheng, Wen },\n title = { { Feature-prompting GBMSeg: One-Shot Reference Guided Training-Free Prompt Engineering for Glomerular Basement Membrane Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Main Paper (Open Access Version): https:\/\/papers.miccai.org\/miccai-2024\/paper\/1841_paper.pdf", "title":"Feature-prompting GBMSeg: One-Shot Reference Guided Training-Free Prompt Engineering for Glomerular Basement Membrane Segmentation", "authors":[ "Liu, Xueyu", "Shi, Guangze", "Wang, Rui", "Lai, Yexin", "Zhang, Jianan", "Sun, Lele", "Yang, Quan", "Wu, Yongfei", "Li, Ming", "Han, Weixia", "Zheng, Wen" ], "id":"Conference", "arxiv_id":"2406.16271", "GitHub":[ "https:\/\/github.com\/SnowRain510\/GBMSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":587 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2585_paper.pdf", "bibtext":"@InProceedings{ Yua_Adapting_MICCAI2024,\n author = { Yuan, Zhouhang and Fang, Zhengqing and Huang, Zhengxing and Wu, Fei and Yao, Yu-Feng and Li, Yingming },\n title = { { Adapting Pre-trained Generative Model to Medical Image for Data Augmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning-based medical image recognition requires a large number of expert-annotated data. As medical image data is often scarce and class imbalanced, many researchers have tried to synthesize medical images as training samples. However, the quality of the generated data determines the effectiveness of the method, which in turn is related to the amount of data available for training. To produce high-quality data augmentation in few-shot settings, we try to adapt large-scale pre-trained generative models to medical images. Specifically, we adapt MAGE (a masked image modeling-based generative model) as the pre-trained generative model, and then an Adapter is implemented within each layer to learn class-wise medical knowledge. In addition, to reduce the complexity caused by high-dimensional latent space, we introduce a vector quantization loss as a constraint during fine-tuning. The experiments are conducted on three different medical image datasets. The results show that our methods produce more realistic augmentation samples than existing generative models, with whom the classification accuracy increased by 5.16%, 2.74% and 3.62% on the three datasets respectively. The results demonstrate that adapting pre-trained generative models for medical image synthesis is a promising way in limited data situations.", "title":"Adapting Pre-trained Generative Model to Medical Image for Data Augmentation", "authors":[ "Yuan, Zhouhang", "Fang, Zhengqing", "Huang, Zhengxing", "Wu, Fei", "Yao, Yu-Feng", "Li, Yingming" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/YuanZhouhang\/VQ-MAGE-Med" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":588 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1123_paper.pdf", "bibtext":"@InProceedings{ She_DataAlgorithmArchitecture_MICCAI2024,\n author = { Sheng, Yi and Yang, Junhuan and Li, Jinyang and Alaina, James and Xu, Xiaowei and Shi, Yiyu and Hu, Jingtong and Jiang, Weiwen and Yang, Lei },\n title = { { Data-Algorithm-Architecture Co-Optimization for Fair Neural Networks on Skin Lesion Dataset } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"As Artificial Intelligence (AI) increasingly integrates into\nour daily lives, fairness has emerged as a critical concern, particularly in medical AI, where datasets often reflect inherent biases due to social factors like the underrepresentation of marginalized communities and socioeconomic barriers to data collection. Traditional approaches to mitigating these biases have focused on data augmentation and the development of fairness-aware training algorithms. However, this paper\nargues that the architecture of neural networks, a core component of Machine Learning (ML), plays a crucial role in ensuring fairness. We demonstrate that addressing fairness effectively requires a holistic approach that simultaneously considers data, algorithms, and architecture. Utilizing Automated ML (AutoML) technology, specifically Neural Architecture Search (NAS), we introduce a novel framework, BiaslessNAS, designed to achieve fair outcomes in analyzing skin lesion datasets. BiaslessNAS incorporates fairness considerations at every stage of the NAS process, leading to the identification of neural networks that are not only more accurate but also significantly fairer. Our experiments show that BiaslessNAS achieves a 2.55% increase in accuracy and a 65.50% improvement in fairness compared to traditional NAS methods, underscoring the importance of integrating fairness into neural network architecture for better outcomes in medical AI applications.", "title":"Data-Algorithm-Architecture Co-Optimization for Fair Neural Networks on Skin Lesion Dataset", "authors":[ "Sheng, Yi", "Yang, Junhuan", "Li, Jinyang", "Alaina, James", "Xu, Xiaowei", "Shi, Yiyu", "Hu, Jingtong", "Jiang, Weiwen", "Yang, Lei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":589 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2390_paper.pdf", "bibtext":"@InProceedings{ Che_AUnified_MICCAI2024,\n author = { Chen, Boqi and Oliva, Junier and Niethammer, Marc },\n title = { { A Unified Model for Longitudinal Multi-Modal Multi-View Prediction with Missingness } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical records often consist of different modalities, such as images, text, and tabular information. Integrating all modalities offers a holistic view of a patient\u2019s condition, while analyzing them longitudinally provides a better understanding of disease progression. However, real-world longitudinal medical records present challenges: 1) patients may lack some or all of the data for a specific timepoint, and 2) certain modalities or views might be absent for all patients during a particular period. In this work, we introduce a unified model for longitudinal multi-modal multi-view prediction with missingness. Our method allows as many timepoints as desired for input, and aims to leverage all available data, regardless of their availability. We conduct extensive experiments on the knee osteoarthritis dataset from the Osteoarthritis Initiative (OAI) for pain and Kellgren-Lawrence grade (KLG) prediction at a future timepoint. We demonstrate the effectiveness of our method by comparing results from our unified model to specific models that use the same modality and view combinations during training and evaluation. We also show the benefit of having extended temporal data and provide post-hoc analysis for a deeper understanding of each modality\/view\u2019s importance for different tasks.", "title":"A Unified Model for Longitudinal Multi-Modal Multi-View Prediction with Missingness", "authors":[ "Chen, Boqi", "Oliva, Junier", "Niethammer, Marc" ], "id":"Conference", "arxiv_id":"2403.12211", "GitHub":[ "https:\/\/github.com\/uncbiag\/UniLMMV" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":590 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2247_paper.pdf", "bibtext":"@InProceedings{ Das_Decoupled_MICCAI2024,\n author = { Das, Ankit and Gautam, Chandan and Cholakkal, Hisham and Agrawal, Pritee and Yang, Feng and Savitha, Ramasamy and Liu, Yong },\n title = { { Decoupled Training for Semi-supervised Medical Image Segmentation with Worst-Case-Aware Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"While semi-supervised learning (SSL) has demonstrated remarkable success in natural image segmentation, tackling medical image segmentation with limited annotated data remains a highly relevant and challenging research problem. Many existing approaches rely on a shared network for learning from both labeled and unlabeled data, facing difficulties in fully exploiting labeled data due to interference from unreliable pseudo-labels. Additionally, they suffer from degradation in model quality resulting from training with unreliable pseudo-labels. To address these challenges, we propose a novel training strategy that uses two distinct decoders\u2014one for labeled data and another for unlabeled data. This decoupling enhances the model\u2019s ability to fully leverage the knowledge embedded within the labeled data. Moreover, we introduce an additional decoder, referred to as the ``worst-case-aware decoder,\u201d which indirectly assesses potential worst case scenario that might emerge from pseudo-label training. We employ adversarial training of the encoder to learn features aimed at avoiding this worst case scenario. Our experimental results on three medical image segmentation datasets demonstrate that our method shows improvements in range of 5.6% - 28.10% (in terms of dice score) compared to the state-of-the-art techniques. The source code is available at \\url{https:\/\/github.com\/thesupermanreturns\/decoupled}.", "title":"Decoupled Training for Semi-supervised Medical Image Segmentation with Worst-Case-Aware Learning", "authors":[ "Das, Ankit", "Gautam, Chandan", "Cholakkal, Hisham", "Agrawal, Pritee", "Yang, Feng", "Savitha, Ramasamy", "Liu, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/thesupermanreturns\/decoupled" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":591 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3097_paper.pdf", "bibtext":"@InProceedings{ Par_CAPTUREGAN_MICCAI2024,\n author = { Park, Chunsu and Kim, Seonho and Lee, DongEon and Lee, SiYeoul and Kambaluru, Ashok and Park, Chankue and Kim, MinWoo },\n title = { { CAPTURE-GAN: Conditional Attribute Preservation through Unveiling Realistic GAN for artifact removal in dual-energy CT imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study addresses the challenge of detecting bone marrow edema (BME) using dual-energy CT (DECT), a task complicated by the lower contrast DECT offers compared to MRI and the presence of artifacts inherent in the image formation process. Despite the advancements in AI-based solutions for image enhancement, achieving an artifact-free outcome in DECT remains difficult due to the impracticality of obtaining paired ground-truth and artifact-containing images for supervised learning. To overcome this, we explore unsupervised techniques such as CycleGAN and AttGAN for artifact removal, which, while effective in other domains, face challenges in DECT due to the similarity between artifact and pathological patterns. Our contribution, the Conditional Attribute Preservation through Unveiling Realistic GAN (CAPTURE-GAN), innovatively combines a generative model with conditional constraints through masking and classification models to not only minimize artifacts but also preserve the pathology of BME and the anatomical integrity of bone. By incorporating bone priors into CycleGAN and adding a disease classification network, CAPTURE-GAN significantly improves the specificity and sensitivity of BME detection in DECT imaging. Our approach demonstrates a substantial enhancement in generating artifact-free images, ensuring that critical diagnostic patterns are not obscured, thereby advancing the potential for DECT in diagnosing and localizing lesions accurately.", "title":"CAPTURE-GAN: Conditional Attribute Preservation through Unveiling Realistic GAN for artifact removal in dual-energy CT imaging", "authors":[ "Park, Chunsu", "Kim, Seonho", "Lee, DongEon", "Lee, SiYeoul", "Kambaluru, Ashok", "Park, Chankue", "Kim, MinWoo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/pnu-amilab\/CAPTURE-GAN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":592 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2749_paper.pdf", "bibtext":"@InProceedings{ Lyu_MetaUNETR_MICCAI2024,\n author = { Lyu, Pengju and Zhang, Jie and Zhang, Lei and Liu, Wenjian and Wang, Cheng and Zhu, Jianjun },\n title = { { MetaUNETR: Rethinking Token Mixer Encoding for Efficient Multi-Organ Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Transformer architecture and versatile CNN backbones have led to advanced progress in sequence modeling and dense prediction tasks. A critical development is the incorporation of different token mixing modules such as ConvNeXt, Swin Transformer. However, findings within the MetaFormer framework suggest these token mixers have a lesser influence on representation learning than the architecture itself. Yet, their impact on 3D medical images remains unclear, motivating our investigation into different token mixers (self-attention, convolution, MLP, recurrence, global filter, and Mamba) in 3D medical image segmentation architectures, and further prompting a reevaluation of the backbone architecture\u2019s role to achieve the trade off in accuracy and efficiency. In the paper, we propose a unified segmentation architecture\u2014MetaUNETR featuring a novel TriCruci layer that decomposes the token mixing processes along each spatial direction while simultaneously preserving precise positional information on its orthogonal plane. By employing the Centered Kernel Alignment (CKA) analysis on feature learning capabilities among these token mixers, we find that the overall architecture of the model, rather than any specific token mixers, plays a more crucial role in determining the model\u2019s performance. Our method is validated across multiple benchmarks varying in size and scale, including the BTCV, AMOS, and AbdomenCT-1K datasets, achieving the top segmentation performance while reducing the model\u2019s parameters by about 80% compared to the state-of-the-art method. This study provides insights for future research on the design and optimization of backbone architecture, steering towards more efficient foundational segmentation models. The source code is available at https:\/\/github.com\/lyupengju\/MetaUNETR.", "title":"MetaUNETR: Rethinking Token Mixer Encoding for Efficient Multi-Organ Segmentation", "authors":[ "Lyu, Pengju", "Zhang, Jie", "Zhang, Lei", "Liu, Wenjian", "Wang, Cheng", "Zhu, Jianjun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lyupengju\/MetaUNETR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":593 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3325_paper.pdf", "bibtext":"@InProceedings{ Hua_Robustly_MICCAI2024,\n author = { Huang, Peng and Hu, Shu and Peng, Bo and Zhang, Jiashu and Wu, Xi and Wang, Xin },\n title = { { Robustly Optimized Deep Feature Decoupling Network for Fatty Liver Diseases Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Current medical image classification efforts mainly aim for higher average performance, often neglecting the balance between different classes. This can lead to significant differences in recognition accuracy between classes and obvious recognition weaknesses. Without the support of massive data, deep learning faces challenges in fine-grained classification of fatty liver. In this paper, we propose an innovative deep learning framework that combines feature decoupling and adaptive adversarial training. Firstly, we employ two iteratively compressed decouplers to supervised decouple common features and specific features related to fatty liver in abdominal ultrasound images. Subsequently, the decoupled features are concatenated with the original image after transforming the color space and are fed into the classifier. During adversarial training, we adaptively adjust the perturbation and balance the adversarial strength by the accuracy of each class. The model will eliminate recognition weaknesses by correctly classifying adversarial samples, thus improving recognition robustness. Finally, the accuracy of our method improved by 4.16%, achieving 82.95%. As demonstrated by extensive experiments, our method is a generalized learning framework that can be directly used to eliminate the recognition weaknesses of any classifier while improving its average performance. Code is available at https:\/\/github.com\/HP-ML\/MICCAI2024.", "title":"Robustly Optimized Deep Feature Decoupling Network for Fatty Liver Diseases Detection", "authors":[ "Huang, Peng", "Hu, Shu", "Peng, Bo", "Zhang, Jiashu", "Wu, Xi", "Wang, Xin" ], "id":"Conference", "arxiv_id":"2406.17338", "GitHub":[ "https:\/\/github.com\/HP-ML\/MICCAI2024" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":594 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2276_paper.pdf", "bibtext":"@InProceedings{ Hua_Uncovering_MICCAI2024,\n author = { Huang, Yanquan and Dan, Tingting and Kim, Won Hwa and Wu, Guorong },\n title = { { Uncovering Cortical Pathways of Prion-like Pathology Spreading in Alzheimer\u2019s Disease by Neural Optimal Mass Transport } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tremendous efforts have been made to investigate stereotypical patterns of tau aggregates in Alzheimer\u2019s disease (AD), current positron emission tomography (PET) technology lacks the capability to quantify the dynamic spreading flows of tau propagation in disease progression, despite the fact that AD is characterized by the propagation of tau aggregates throughout the brain in a prion-like manner. We address this challenge by formulating the seek for latent cortical tau propagation pathways into a well-studied physics model of the optimal mass transport (OMT) problem, where the dynamic behavior of tau spreading across longitudinal tau-PET scans is constrained by the geometry of the brain cortex. In this context, we present a variational framework for dynamical system of tau propagation in the brain, where the spreading flow field is essentially a Wasserstein geodesic between two density distributions of spatial tau accumulation. Meanwhile, our variational framework provides a flexible approach to model the possible increase of tau aggregates and alleviate the issue of vanishing flows by introducing a total variation (TV) regularization on flow field. Following the spirit of physics-informed deep model, we derive the governing equation of the new TV-based unbalanced OMT model and customize an explainable generative adversarial network to (1) parameterize the population-level OMT using generator and (2) predict tau spreading flow for the unseen subject by the trained discriminator. We have evaluated the accuracy of our proposed model using the ADNI and OASIS datasets, focusing on its ability to herald future tau accumulation. Since our deep model follows the second law of thermodynamics, we further investigate the propagation mechanism of tau aggregates as AD advances. Compared to existing methodologies, our physics-informed approach delivers superior accuracy and interpretability, showcasing promising potential for uncovering novel neurobiological mechanisms.", "title":"Uncovering Cortical Pathways of Prion-like Pathology Spreading in Alzheimer\u2019s Disease by Neural Optimal Mass Transport", "authors":[ "Huang, Yanquan", "Dan, Tingting", "Kim, Won Hwa", "Wu, Guorong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":595 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0703_paper.pdf", "bibtext":"@InProceedings{ Fen_Enhancing_MICCAI2024,\n author = { Feng, Chun-Mei },\n title = { { Enhancing Label-efficient Medical Image Segmentation with Text-guided Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Aside from offering state-of-the-art performance in medical image generation, denoising diffusion probabilistic models (DPM) can also serve as a representation learner to capture semantic information and potentially be used as an image representation for downstream tasks, e.g., segmentation. However, these latent semantic representations rely heavily on labor-intensive pixel-level annotations as supervision, limiting the usability of DPM in medical image segmentation. To address this limitation, we propose an enhanced diffusion segmentation model, called TextDiff, that improves semantic representation through inexpensive medical text annotations, thereby explicitly establishing semantic representation and language correspondence for diffusion models. Concretely, TextDiff extracts intermediate activations of the Markov step of the reverse diffusion process in a pretrained diffusion model on large-scale natural images and learns additional expert knowledge by combining them with complementary and readily available diagnostic text information. TextDiff freezes the dual-branch multi-modal structure and mines the latent alignment of semantic features in diffusion models with diagnostic descriptions by only training the cross-attention mechanism and pixel classifier, making it possible to enhance semantic representation with inexpensive text. Extensive experiments on public QaTa-COVID19 and MoNuSeg datasets show that our TextDiff is significantly superior to the state-of-the-art multi-modal segmentation methods with only a few training samples. Our code and models will be publicly available.", "title":"Enhancing Label-efficient Medical Image Segmentation with Text-guided Diffusion Models", "authors":[ "Feng, Chun-Mei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/chunmeifeng\/TextDiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":596 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3664_paper.pdf", "bibtext":"@InProceedings{ Yu_PET_MICCAI2024,\n author = { Yu, Boxiao and Ozdemir, Savas and Dong, Yafei and Shao, Wei and Shi, Kuangyu and Gong, Kuang },\n title = { { PET Image Denoising Based on 3D Denoising Diffusion Probabilistic Model: Evaluations on Total-Body Datasets } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Due to various physical degradation factors and limited photon counts detected, obtaining high-quality images from low-dose Positron emission tomography (PET) scans is challenging. The Denoising Diffusion Probabilistic Model (DDPM), an advanced distribution learning-based generative model, has shown promising performance across various computer-vision tasks. However, currently DDPM is mainly investigated in 2D mode, which has limitations for PET image denoising, as PET is usually acquired, reconstructed, and analyzed in 3D mode. In this work, we proposed a 3D DDPM method for PET image denoising, which employed a 3D convolutional network to train the score function, enabling the network to learn 3D distribution. The total-body 18F-FDG PET datasets acquired from the Siemens Biograph Vision Quadra scanner (axial field of view > 1m) were employed to evaluate the 3D DDPM method, as these total-body datasets needed 3D operations the most to leverage the rich information from different axial slices. All models were trained on 1\/20 low-dose images and then evaluated on 1\/4, 1\/20, and 1\/50 low-dose images, respectively. Experimental results indicated that 3D DDPM significantly outperformed 2D DDPM and 3D UNet in qualitative and quantitative assessments, capable of recovering finer structures and more accurate edge contours from low-quality PET images. Moreover, 3D DDPM revealed greater robustness when there were noise level mismatches between training and testing data. Finally, comparing 3D DDPM with 2D DDPM in terms of uncertainty revealed 3D DDPM\u2019s higher confidence in reproducibility.", "title":"PET Image Denoising Based on 3D Denoising Diffusion Probabilistic Model: Evaluations on Total-Body Datasets", "authors":[ "Yu, Boxiao", "Ozdemir, Savas", "Dong, Yafei", "Shao, Wei", "Shi, Kuangyu", "Gong, Kuang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":597 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0117_paper.pdf", "bibtext":"@InProceedings{ Gao_Aligning_MICCAI2024,\n author = { Gao, Yunhe and Gu, Difei and Zhou, Mu and Metaxas, Dimitris },\n title = { { Aligning Human Knowledge with Visual Concepts Towards Explainable Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Although explainability is essential in the clinical diagnosis, most deep learning models still function as black boxes without elucidating their decision-making process. In this study, we investigate the explainable model development that can mimic the decision-making process of human experts by fusing the domain knowledge of explicit diagnostic criteria. We introduce a simple yet effective framework, Explicd, towards Explainable language-informed criteria-based diagnosis. Explicd initiates its process by querying domain knowledge from either large language models (LLMs) or human experts to establish diagnostic criteria across various concept axes (e.g., color, shape, texture, or specific patterns of diseases). By leveraging a pretrained vision-language model, Explicd injects these criteria into the embedding space as knowledge anchors, thereby facilitating the learning of corresponding visual concepts within medical images. The final diagnostic outcome is determined based on the similarity scores between the encoded visual concepts and the textual criteria embeddings. Through extensive evaluation on five medical image classification benchmarks, Explicd has demonstrates its inherent explianability and extends to improve classification performance compared to traditional black-box models.", "title":"Aligning Human Knowledge with Visual Concepts Towards Explainable Medical Image Classification", "authors":[ "Gao, Yunhe", "Gu, Difei", "Zhou, Mu", "Metaxas, Dimitris" ], "id":"Conference", "arxiv_id":"2406.05596", "GitHub":[ "https:\/\/github.com\/yhygao\/Explicd" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":598 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1027_paper.pdf", "bibtext":"@InProceedings{ Tho_EchoNarrator_MICCAI2024,\n author = { Thomas, Sarina and Cao, Qing and Novikova, Anna and Kulikova, Daria and Ben-Yosef, Guy },\n title = { { EchoNarrator: Generating natural text explanations for ejection fraction predictions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ejection fraction (EF) of the left ventricle (LV) is considered as one of the most important measurements for diagnosing acute heart failure and can be estimated during cardiac ultrasound acquisition. While recent successes in deep learning research successfully estimate EF values, the proposed models often lack an explanation for the prediction. However, providing clear and intuitive explanations for clinical measurement predictions would increase the trust of cardiologists in these models.\nIn this paper, we explore predicting EF measurements with Natural Language Explanation (NLE). We propose a model that in a single forward pass combines estimation of the LV contour over multiple frames, together with a set of modules and routines for computing various motion and shape attributes that are associated with ejection fraction. It then feeds the attributes into a large language model to generate text that helps to explain the network\u2019s outcome in a human-like manner. We provide experimental evaluation of our explanatory output, as well as EF prediction, and show that our model can provide EF comparable to state-of-the-art together with meaningful and accurate natural language explanation to the prediction. The project page can be found at https:\/\/github.com\/guybenyosef\/EchoNarrator .", "title":"EchoNarrator: Generating natural text explanations for ejection fraction predictions", "authors":[ "Thomas, Sarina", "Cao, Qing", "Novikova, Anna", "Kulikova, Daria", "Ben-Yosef, Guy" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/guybenyosef\/EchoNarrator" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":599 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0704_paper.pdf", "bibtext":"@InProceedings{ Du_CLEFT_MICCAI2024,\n author = { Du, Yuexi and Chang, Brian and Dvornek, Nicha C. },\n title = { { CLEFT: Language-Image Contrastive Learning with Efficient Large Language Model and Prompt Fine-Tuning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in Contrastive Language-Image Pre-training (CLIP) have demonstrated notable success in self-supervised representation learning across various tasks. However, the existing CLIP-like approaches often demand extensive GPU resources and prolonged training times due to the considerable size of the model and dataset, making them poor for medical applications, in which large datasets are not always common. Meanwhile, the language model prompts are mainly manually derived from labels tied to images, potentially overlooking the richness of information within training samples. We introduce a novel language-image Contrastive Learning method with an Efficient large language model and prompt Fine-Tuning (CLEFT) that harnesses the strengths of the extensive pre-trained language and visual models. Furthermore, we present an efficient strategy for learning context-based prompts that mitigates the gap between informative clinical diagnostic data and simple class labels. Our method demonstrates state-of-the-art performance on multiple chest X-ray and mammography datasets compared with various baselines. The proposed parameter efficient framework can reduce the total trainable model size by 39% and reduce the trainable language model to only 4% compared with the current BERT encoder.", "title":"CLEFT: Language-Image Contrastive Learning with Efficient Large Language Model and Prompt Fine-Tuning", "authors":[ "Du, Yuexi", "Chang, Brian", "Dvornek, Nicha C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/XYPB\/CLEFT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":600 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0669_paper.pdf", "bibtext":"@InProceedings{ Par_SSAM_MICCAI2024,\n author = { Paranjape, Jay N. and Sikder, Shameema and Vedula, S. Swaroop and Patel, Vishal M. },\n title = { { S-SAM: SVD-based Fine-Tuning of Segment Anything Model for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation has been traditionally approached by training or fine-tuning the entire model to cater to any new modality or dataset. However, this approach often requires tuning a large number of parameters during training. With the introduction of the Segment Anything Model (SAM) for prompted segmentation of natural images, many efforts have been made towards adapting it efficiently for medical imaging, thus reducing the training time and resources. However, these methods still require expert annotations for every image in the form of point prompts or bounding box prompts during training and inference, making it tedious to employ them in practice. In this paper, we propose an adaptation technique, called S-SAM, that only trains parameters equal to 0.4% of SAM\u2019s parameters and at the same time uses simply the label names as prompts for producing precise masks. This not only makes tuning SAM more efficient than the existing adaptation methods but also removes the burden of providing expert prompts. We call this modified version S-SAM and evaluate it on five different modalities including endoscopic images, x-ray, ultrasound, CT, and histology images. Our experiments show that S-SAM outperforms state-of-the-art methods as well as existing SAM adaptation methods while tuning a significantly less number of parameters. We release the code for S-SAM at https:\/\/github.com\/JayParanjape\/SVDSAM.", "title":"S-SAM: SVD-based Fine-Tuning of Segment Anything Model for Medical Image Segmentation", "authors":[ "Paranjape, Jay N.", "Sikder, Shameema", "Vedula, S. Swaroop", "Patel, Vishal M." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JayParanjape\/SVDSAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":601 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1009_paper.pdf", "bibtext":"@InProceedings{ Xia_Customized_MICCAI2024,\n author = { Xia, Zhengwang and Wang, Huan and Zhou, Tao and Jiao, Zhuqing and Lu, Jianfeng },\n title = { { Customized Relationship Graph Neural Network for Brain Disorder Identification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The connectivity structure of brain networks\/graphs provides insights into the segregation and integration patterns among diverse brain regions. Numerous studies have demonstrated that specific brain disorders are associated with abnormal connectivity patterns within distinct regions. Consequently, several Graph Neural Network (GNN) models have been developed to automatically identify irregular integration patterns in brain graphs. However, the inputs for these GNN-based models, namely brain networks\/graphs, are typically constructed using statistical-specific metrics and cannot be trained. This limitation might render them ineffective for downstream tasks, potentially leading to suboptimal outcomes. To address this issue, we propose a Customized Relationship Graph Neural Network (CRGNN) that can bridge the gap between the graph structure and the downstream task. The proposed method can dynamically learn the optimal brain networks\/graphs for each task. Specifically, we design a block that contains multiple parameterized gates to preserve causal relationships among different brain regions. In addition, we devise a novel node aggregation rule and an appropriate constraint to improve the robustness of the model. The proposed method is evaluated on two publicly available datasets, demonstrating superior performance compared to existing methods. The implementation code is available at https:\/\/github.com\/NJUSTxiazw\/CRGNN.", "title":"Customized Relationship Graph Neural Network for Brain Disorder Identification", "authors":[ "Xia, Zhengwang", "Wang, Huan", "Zhou, Tao", "Jiao, Zhuqing", "Lu, Jianfeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/NJUSTxiazw\/CRGNN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":602 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2477_paper.pdf", "bibtext":"@InProceedings{ Sin_TrIND_MICCAI2024,\n author = { Sinha, Ashish and Hamarneh, Ghassan },\n title = { { TrIND: Representing Anatomical Trees by Denoising Diffusion of Implicit Neural Fields } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Anatomical trees play a central role in clinical diagnosis and\ntreatment planning. However, accurately representing anatomical trees is challenging due to their varying and complex topology and geometry. Traditional methods for representing tree structures, captured using medical imaging, while invaluable for visualizing vascular and bronchial networks, exhibit drawbacks in terms of limited resolution, flexibility, and efficiency. Recently, implicit neural representations (INRs) have emerged as a powerful tool for representing shapes accurately and efficiently. We propose a novel approach, TrIND, for representing anatomical trees using INR, while also capturing the distribution of a set of trees via denoising diffusion in the space of INRs. We accurately capture the intricate geometries and topologies of anatomical trees at any desired resolution. Through extensive qualitative and quantitative evaluation, we demonstrate high-fidelity tree reconstruction with arbitrary resolution yet compact storage, and versatility across anatomical sites and tree complexities. Our code is available \\href{https:\/\/github.com\/sfu-mial\/TreeDiffusion}{here}.", "title":"TrIND: Representing Anatomical Trees by Denoising Diffusion of Implicit Neural Fields", "authors":[ "Sinha, Ashish", "Hamarneh, Ghassan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/sfu-mial\/TreeDiffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":603 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0498_paper.pdf", "bibtext":"@InProceedings{ Zha_CoarsetoFine_MICCAI2024,\n author = { Zhang, Yuhan and Huang, Kun and Yang, Xikai and Ma, Xiao and Wu, Jian and Wang, Ningli and Wang, Xi and Heng, Pheng-Ann },\n title = { { Coarse-to-Fine Latent Diffusion Model for Glaucoma Forecast on Sequential Fundus Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Glaucoma is one of the leading causes of irreversible blindness worldwide. Predicting the future status of glaucoma is essential for early detection and timely intervention of potential patients and avoiding the outcome of blindness. Based on historical fundus images from patients, existing glaucoma forecast methods directly predict the probability of developing glaucoma in the future. In this paper, we propose a novel glaucoma forecast method called Coarse-to-Fine Latent Diffusion Model (C2F-LDM) to generatively predict the possible features at any future time point in the latent space based on sequential fundus images. After obtaining the predicted features, we can detect the probability of developing glaucoma and reconstruct future fundus images for visualization. Since all fundus images in the sequence are sampled at irregular time points, we propose a time-adaptive sequence encoder that encodes the sequential fundus images with their irregular time intervals as the historical condition to guide the latent diffusion model, making the model capable of capturing the status changes of glaucoma over time. Furthermore, a coarse-to-fine diffusion strategy improves the quality of the predicted features. We verify C2F-LDM on the public glaucoma forecast dataset SIGF. C2F-LDM presents better quantitative results than other state-of-the-art forecast methods and provides visual results for qualitative evaluations.", "title":"Coarse-to-Fine Latent Diffusion Model for Glaucoma Forecast on Sequential Fundus Images", "authors":[ "Zhang, Yuhan", "Huang, Kun", "Yang, Xikai", "Ma, Xiao", "Wu, Jian", "Wang, Ningli", "Wang, Xi", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ZhangYH0502\/C2F-LDM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":604 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1339_paper.pdf", "bibtext":"@InProceedings{ Zep_Laplacian_MICCAI2024,\n author = { Zepf, Kilian and Wanna, Selma and Miani, Marco and Moore, Juston and Frellsen, Jes and Hauberg, S\u00f8ren and Warburg, Frederik and Feragen, Aasa },\n title = { { Laplacian Segmentation Networks Improve Epistemic Uncertainty Quantification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Image segmentation relies heavily on neural networks which are known to be overconfident, especially when making predictions on out-of-distribution (OOD) images. This is a common scenario in the medical domain due to variations in equipment, acquisition sites, or image corruptions. This work addresses the challenge of OOD detection by proposing Laplacian Segmentation Networks (LSN): methods which jointly model epistemic (model) and aleatoric (data) uncertainty for OOD detection. In doing so, we propose the first Laplace approximation of the weight posterior that scales to large neural networks with skip connections that have high-dimensional outputs. We demonstrate on three datasets that the LSN-modeled parameter distributions, in combination with suitable uncertainty measures, gives superior OOD detection.", "title":"Laplacian Segmentation Networks Improve Epistemic Uncertainty Quantification", "authors":[ "Zepf, Kilian", "Wanna, Selma", "Miani, Marco", "Moore, Juston", "Frellsen, Jes", "Hauberg, S\u00f8ren", "Warburg, Frederik", "Feragen, Aasa" ], "id":"Conference", "arxiv_id":"2303.13123", "GitHub":[ "https:\/\/github.com\/kilianzepf\/laplacian_segmentation" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":605 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1818_paper.pdf", "bibtext":"@InProceedings{ Guo_FreeSurGS_MICCAI2024,\n author = { Guo, Jiaxin and Wang, Jiangliu and Kang, Di and Dong, Wenzhen and Wang, Wenting and Liu, Yun-hui },\n title = { { Free-SurGS: SfM-Free 3D Gaussian Splatting for Surgical Scene Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Real-time 3D reconstruction of surgical scenes plays a vital role in computer-assisted surgery, holding a promise to enhance surgeons\u2019 visibility. Recent advancements in 3D Gaussian Splatting (3DGS) have shown great potential for real-time novel view synthesis of general scenes, which relies on accurate poses and point clouds generated by Structure-from-Motion (SfM) for initialization. However, 3DGS with SfM fails to recover accurate camera poses and geometry in surgical scenes due to the challenges of minimal textures and photometric inconsistencies. To tackle this problem, in this paper, we propose the first SfM-free 3DGS-based method for surgical scene reconstruction by jointly optimizing the camera poses and scene representation. Based on the video continuity, the key of our method is to exploit the immediate optical flow priors to guide the projection flow derived from 3D Gaussians. Unlike most previous methods relying on photometric loss only, we formulate the pose estimation problem as minimizing the flow loss between the projection flow and optical flow. A consistency check is further introduced to filter the flow outliers by detecting the rigid and reliable points that satisfy the epipolar geometry. During 3D Gaussian optimization, we randomly sample frames to optimize the scene representations to grow the 3D Gaussian progressively. Experiments on the SCARED dataset demonstrate our superior performance than existing methods in novel view synthesis and pose estimation with high efficiency.", "title":"Free-SurGS: SfM-Free 3D Gaussian Splatting for Surgical Scene Reconstruction", "authors":[ "Guo, Jiaxin", "Wang, Jiangliu", "Kang, Di", "Dong, Wenzhen", "Wang, Wenting", "Liu, Yun-hui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/wrld\/Free-SurGS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":606 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1348_paper.pdf", "bibtext":"@InProceedings{ Zen_Tackling_MICCAI2024,\n author = { Zeng, Shuang and Guo, Pengxin and Wang, Shuai and Wang, Jianbo and Zhou, Yuyin and Qu, Liangqiong },\n title = { { Tackling Data Heterogeneity in Federated Learning via Loss Decomposition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated Learning (FL) is a rising approach towards collaborative and privacy-preserving machine learning where large-scale medical datasets remain localized to each client. However, the issue of data heterogeneity among clients often compels local models to diverge, leading to suboptimal global models. To mitigate the impact of data heterogeneity on FL performance, we start with analyzing how FL training influence FL performance by decomposing the global loss into three terms: local loss, distribution shift loss and aggregation loss. \nRemarkably, our loss decomposition reveals that existing local training-based FL methods attempt to further reduce the distribution shift loss, while the global aggregation-based FL methods propose better aggregation strategies to reduce the aggregation loss. Nevertheless, a comprehensive joint effort to minimize all three terms is currently limited in the literature, leading to subpar performance when dealing with data heterogeneity challenges. To fill this gap, we propose a novel FL method based on global loss decomposition, called FedLD, to jointly reduce these three loss terms. Our FedLD involves a margin control regularization in local training to reduce the distribution shift loss, and a principal gradient-based server aggregation strategy to reduce the aggregation loss. Notably, under different levels of data heterogeneity, our strategies achieve better and more robust performance on retinal and chest X-ray classification compared to other FL algorithms. Our code is available at https:\/\/github.com\/Zeng-Shuang\/FedLD.", "title":"Tackling Data Heterogeneity in Federated Learning via Loss Decomposition", "authors":[ "Zeng, Shuang", "Guo, Pengxin", "Wang, Shuai", "Wang, Jianbo", "Zhou, Yuyin", "Qu, Liangqiong" ], "id":"Conference", "arxiv_id":"2408.12300", "GitHub":[ "https:\/\/github.com\/Zeng-Shuang\/FedLD" ], "paper_page":"https:\/\/huggingface.co\/papers\/2408.12300", "n_linked_authors":1, "upvotes":0, "num_comments":0, "n_authors":6, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":607 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2798_paper.pdf", "bibtext":"@InProceedings{ Jia_Intrapartum_MICCAI2024,\n author = { Jiang, Jianmei and Wang, Huijin and Bai, Jieyun and Long, Shun and Chen, Shuangping and Campello, Victor M. and Lekadir, Karim },\n title = { { Intrapartum Ultrasound Image Segmentation of Pubic Symphysis and Fetal Head Using Dual Student-Teacher Framework with CNN-ViT Collaborative Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The segmentation of the pubic symphysis and fetal head (PSFH) constitutes a pivotal step in monitoring labor progression and identifying potential delivery complications. Despite the advances in deep learning, the lack of annotated medical images hinders the training of segmentation. Traditional semi-supervised learning approaches primarily utilize a unified network model based on Convolutional Neural Networks (CNNs) and apply consistency regularization to mitigate the reliance on extensive annotated data. However, these methods often fall short in capturing the discriminative features of unlabeled data and in delineating the long-range dependencies inherent in the ambiguous boundaries of PSFH within ultrasound images. To address these limitations, we introduce a novel framework, the Dual-Student and Teacher Combining CNN and Transformer (DSTCT), which synergistically integrates the capabilities of CNNs and Transformers. Our framework comprises a tripartite architecture featuring a Vision Transformer (ViT) as the \u2018teacher\u2019 and two \u2018student\u2019 models \u2014 one ViT and one CNN. This dual-student setup enables mutual supervision through the generation of both hard and soft pseudo-labels, with the consistency in their predictions being refined by minimizing the classifier determinacy discrepancy. The teacher model further reinforces learning within this architecture through the imposition of consistency regularization constraints. To augment the generalization abilities of our approach, we employ a blend of data and model perturbation techniques. Comprehensive evaluations on the benchmark dataset of the PSFH Segmentation Grand Challenge at MICCAI 2023 demonstrate our DSTCT framework outperformed 10 contemporary semi-supervised segmentation methods.", "title":"Intrapartum Ultrasound Image Segmentation of Pubic Symphysis and Fetal Head Using Dual Student-Teacher Framework with CNN-ViT Collaborative Learning", "authors":[ "Jiang, Jianmei", "Wang, Huijin", "Bai, Jieyun", "Long, Shun", "Chen, Shuangping", "Campello, Victor M.", "Lekadir, Karim" ], "id":"Conference", "arxiv_id":"2409.06928", "GitHub":[ "https:\/\/github.com\/jjm1589\/DSTCT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":608 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0134_paper.pdf", "bibtext":"@InProceedings{ Zha_Variational_MICCAI2024,\n author = { Zhang, Qi and Liu, Xiujian and Zhang, Heye and Xu, Chenchu and Yang, Guang and Yuan, Yixuan and Tan, Tao and Gao, Zhifan },\n title = { { Variational Field Constraint Learning for Degree of Coronary Artery Ischemia Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The crucial role in diagnosing ischemic coronary artery disease is played by fractional flow reserve evaluation. Machine learning based fractional flow reserve evaluation has become the most important method due to it effectiveness and high computation efficiency. However, it still suffers from lacking of the proper description for the coronary artery fluid. This study presents a variational field constraint learning method for assessing fractional flow reserve from digital subtraction angiography images. Our method offers a promising approach by integrating governing equations and boundary conditions as unified constraints. Moreover, we also provide a multi-vessels neural network for the prediction of FFR for coronary artery. By leveraging a holistic consideration of the fluid dynamics, our method achieves more accurate fractional flow reserve prediction compared to existing methods. Our VFCLM is evaluated over 8000 virtual subjects produced by 1D hemodynamic models and 180 in-vivo cases. VFCLM achieves the MAE of 1.17 mmHg and MAPE of 1.20% for quantification.", "title":"Variational Field Constraint Learning for Degree of Coronary Artery Ischemia Assessment", "authors":[ "Zhang, Qi", "Liu, Xiujian", "Zhang, Heye", "Xu, Chenchu", "Yang, Guang", "Yuan, Yixuan", "Tan, Tao", "Gao, Zhifan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":609 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1595_paper.pdf", "bibtext":"@InProceedings{ She_APSUSCT_MICCAI2024,\n author = { Sheng, Yi and Wang, Hanchen and Liu, Yipei and Yang, Junhuan and Jiang, Weiwen and Lin, Youzuo and Yang, Lei },\n title = { { APS-USCT: Ultrasound Computed Tomography on Sparse Data via AI-Physic Synergy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ultrasound computed tomography (USCT) is a promising technique that achieves superior medical imaging reconstruction resolution by fully leveraging waveform information, outperforming conventional ultrasound methods. Despite its advantages, high-quality USCT reconstruction relies on extensive data acquisition by a large number of transducers, leading to increased costs, computational demands, extended patient scanning times, and manufacturing complexities. To mitigate these issues, we propose a new USCT method called APS-USCT, which facilitates imaging with sparse data, substantially reducing dependence on high-cost dense data acquisition. Our APS-USCT method consists of two primary components: APS-wave and APS-FWI. The APS-wave component, an encoder-decoder system, preprocesses the waveform data, converting sparse data into dense waveforms to augment sample density prior to reconstruction. The APS-FWI component, utilizing the InversionNet, directly reconstructs the speed of sound (SOS) from the ultrasound waveform data. We further improve the model\u2019s performance by incorporating Squeeze-and-Excitation (SE) Blocks and source encoding techniques. Testing our method on a breast cancer dataset yielded promising results. It demonstrated outstanding performance with an average Structural Similarity Index (SSIM) of 0.8431. Notably, over 82% of samples achieved an SSIM above 0.8, with nearly 61% exceeding 0.85, highlighting the significant potential of our approach in improving USCT image reconstruction by efficiently utilizing sparse data.", "title":"APS-USCT: Ultrasound Computed Tomography on Sparse Data via AI-Physic Synergy", "authors":[ "Sheng, Yi", "Wang, Hanchen", "Liu, Yipei", "Yang, Junhuan", "Jiang, Weiwen", "Lin, Youzuo", "Yang, Lei" ], "id":"Conference", "arxiv_id":"2407.14564", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":610 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1358_paper.pdf", "bibtext":"@InProceedings{ Fan_Aligning_MICCAI2024,\n author = { Fang, Xiao and Lin, Yi and Zhang, Dong and Cheng, Kwang-Ting and Chen, Hao },\n title = { { Aligning Medical Images with General Knowledge from Large Language Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pre-trained large vision-language models (VLMs) like CLIP have revolutionized visual representation learning using natural language as supervisions, and have demonstrated promising generalization ability. In this work, we propose ViP, a novel visual symptom-guided prompt learning framework for medical image analysis, which facilitates general knowledge transfer from CLIP. ViP consists of two key components: a visual symptom generator (VSG) and a dual-prompt network. Specifically, VSG aims to extract explicable visual symptoms from pre-trained large language models, while the dual-prompt network uses these visual symptoms to guide the training on two learnable prompt modules, i.e., context prompt and merge prompt, to better adapt our framework to medical image analysis via large VLMs. Extensive experimental results demonstrate that ViP can achieve competitive performance compared to the state-of-the-art methods on two challenging datasets. We provide the source code in the supplementary material.", "title":"Aligning Medical Images with General Knowledge from Large Language Models", "authors":[ "Fang, Xiao", "Lin, Yi", "Zhang, Dong", "Cheng, Kwang-Ting", "Chen, Hao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/xiaofang007\/ViP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":611 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1871_paper.pdf", "bibtext":"@InProceedings{ W\u00e5h_Explainable_MICCAI2024,\n author = { W\u00e5hlstrand Sk\u00e4rstr\u00f6m, Victor and Johansson, Lisa and Alv\u00e9n, Jennifer and Lorentzon, Mattias and H\u00e4ggstr\u00f6m, Ida },\n title = { { Explainable vertebral fracture analysis with uncertainty estimation using differentiable rule-based classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present a novel method for explainable vertebral fracture assessment (XVFA) in low-dose radiographs using deep neural networks, incorporating vertebra detection and keypoint localization with uncertainty estimates. We incorporate Genant\u2019s semi-quantitative criteria as a differentiable rule-based means of classifying both vertebra fracture grade and morphology. Unlike previous work, XVFA provides explainable classifications relatable to current clinical methodology, as well as uncertainty estimations, while at the same time surpassing state-of-the art methods with a vertebra-level sensitivity of 93% and end-to-end AUC of 97% in a challenging setting. Moreover, we compare intra-reader agreement with model uncertainty estimates, with model reliability on par with human annotators.", "title":"Explainable vertebral fracture analysis with uncertainty estimation using differentiable rule-based classification", "authors":[ "W\u00e5hlstrand Sk\u00e4rstr\u00f6m, Victor", "Johansson, Lisa", "Alv\u00e9n, Jennifer", "Lorentzon, Mattias", "H\u00e4ggstr\u00f6m, Ida" ], "id":"Conference", "arxiv_id":"2407.02926", "GitHub":[ "https:\/\/github.com\/waahlstrand\/xvfa" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":612 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3401_paper.pdf", "bibtext":"@InProceedings{ Dha_VideoCutMix_MICCAI2024,\n author = { Dhanakshirur, Rohan Raju and Tyagi, Mrinal and Baby, Britty and Suri, Ashish and Kalra, Prem and Arora, Chetan },\n title = { { VideoCutMix: Temporal Segmentation of Surgical Videos in Scarce Data Scenarios } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Temporal Action Segmentation (TAS) of a surgical video is an important first step for a variety of video analysis tasks such as skills assessment, surgical assistance and robotic surgeries. Limited data availability due to costly acquisition and annotation makes data augmentation imperative in such a scenario. However, extending directly from an image-augmentation strategy, most video augmentation techniques disturb the optical flow information in the process of generating an augmented sample. This creates difficulty in training. In this paper, we propose a simple-yet-efficient, flow-consistent, video-specific data augmentation technique suitable for TAS in scarce data conditions. This is the first augmentation for data-scarce TAS in surgical scenarios. We observe that TAS errors commonly occur at the action boundaries due to their scarcity in the datasets. Hence, we propose a novel strategy that generates pseudo-action boundaries without affecting optical flow elsewhere. Further, we also propose a sample-hardness-inspired curriculum where we train the model on easy samples first with only a single label observed in the temporal window. Additionally, we contribute the first-ever non-robotic Neuro-endoscopic Trainee Simulator (NETS) dataset for the task of TAS. We validate our approach on the proposed NETS, along with publicly available JIGSAWS and Cholec T-50 datasets. Compared to without the use of any data augmentation, we report an average improvement of 7.89%, 5.53%, 2.80%, respectively, on the 3 datasets in terms of edit score using our technique. The reported numbers are improvements averaged over 9 state-of-the-art (SOTA) action segmentation models using two different temporal feature extractors (I3D and VideoMAE). On average, the proposed technique outperforms the best-performing SOTA data augmentation technique by 3.94%, thus enabling us to set up a new SOTA for action segmentation in each of these datasets. https:\/\/aineurosurgery.github.io\/VideoCutMix", "title":"VideoCutMix: Temporal Segmentation of Surgical Videos in Scarce Data Scenarios", "authors":[ "Dhanakshirur, Rohan Raju", "Tyagi, Mrinal", "Baby, Britty", "Suri, Ashish", "Kalra, Prem", "Arora, Chetan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/AINeurosurgery\/VideoCutMix" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":613 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0900_paper.pdf", "bibtext":"@InProceedings{ Zhu_LowRank_MICCAI2024,\n author = { Zhu, Vince and Ji, Zhanghexuan and Guo, Dazhou and Wang, Puyang and Xia, Yingda and Lu, Le and Ye, Xianghua and Zhu, Wei and Jin, Dakai },\n title = { { Low-Rank Continual Pyramid Vision Transformer: Incrementally Segment Whole-Body Organs in CT with Light-Weighted Adaptation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep segmentation networks achieve high performance when trained on specific datasets. However, in clinical practice, it is often desirable that pretrained segmentation models can be dynamically extended to enable segmenting new organs without access to previous training datasets or without training from scratch. This would ensure a much more efficient model development and deployment paradigm accounting for the patient privacy and data storage issues. This clinically preferred process can be viewed as a continual semantic segmentation (CSS) problem. Previous CSS works would either experience catastrophic forgetting or lead to unaffordable memory costs as models expand. In this work, we propose a new continual whole-body organ segmentation model with light-weighted low-rank adaptation (LoRA). We first train and freeze a pyramid vision transformer (PVT) base segmentation model on the initial task, then continually add light-weighted trainable LoRA parameters to the frozen model for each new learning task. Through a holistically exploration of the architecture modification, we identify three most important layers (i.e., patch-embedding, multi-head attention and feed forward layers) that are critical in adapting to the new segmentation tasks, while retaining the majority of the pre-trained parameters fixed. Our proposed model continually segments new organs without catastrophic forgetting and meanwhile maintaining a low parameter increasing rate. Continually trained and tested on four datasets covering different body parts of a total of 121 organs, results show that our model achieves high segmentation accuracy, closely reaching the PVT and nnUNet upper bounds, and significantly outperforms other regularization-based CSS methods. When comparing to the leading architecture-based CSS method, our model has a substantial lower parameter increasing rate (16.7\\% versus 96.7\\%) while achieving comparable performance.", "title":"Low-Rank Continual Pyramid Vision Transformer: Incrementally Segment Whole-Body Organs in CT with Light-Weighted Adaptation", "authors":[ "Zhu, Vince", "Ji, Zhanghexuan", "Guo, Dazhou", "Wang, Puyang", "Xia, Yingda", "Lu, Le", "Ye, Xianghua", "Zhu, Wei", "Jin, Dakai" ], "id":"Conference", "arxiv_id":"2410.04689", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":614 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1135_paper.pdf", "bibtext":"@InProceedings{ Liu_Auxiliary_MICCAI2024,\n author = { Liu, Yikang and Zhao, Lin and Chen, Eric Z. and Chen, Xiao and Chen, Terrence and Sun, Shanhui },\n title = { { Auxiliary Input in Training: Incorporating Catheter Features into Deep Learning Models for ECG-Free Dynamic Coronary Roadmapping } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dynamic coronary roadmapping is a technology that overlays the vessel maps (the \u201croadmap\u201d) extracted from an offline image sequence of X-ray angiography onto a live stream of X-ray fluoroscopy in real-time. It aims to offer navigational guidance for interventional surgeries without the need for repeated contrast agent injections, thereby reducing the risks associated with radiation exposure and kidney failure. The precision of the roadmaps is contingent upon the accurate alignment of angiographic and fluoroscopic images based on their cardiac phases, as well as precise catheter tip tracking. The former ensures the selection of a roadmap that closely matches the vessel shape in the current frame, while the latter uses catheter tips as reference points to adjust for translational motion between the roadmap and the present vessel tree. Training deep learning models for both tasks is challenging and underexplored. However, incorporating catheter features into the models could offer substantial benefits, given humans heavily rely on catheters to complete the tasks. To this end, we introduce a simple but effective method, auxiliary input in training (AIT), and demonstrate that it enhances model performance across both tasks, outperforming baseline methods in knowledge incorporation and transfer learning.", "title":"Auxiliary Input in Training: Incorporating Catheter Features into Deep Learning Models for ECG-Free Dynamic Coronary Roadmapping", "authors":[ "Liu, Yikang", "Zhao, Lin", "Chen, Eric Z.", "Chen, Xiao", "Chen, Terrence", "Sun, Shanhui" ], "id":"Conference", "arxiv_id":"2408.15947", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":615 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2774_paper.pdf", "bibtext":"@InProceedings{ Wan_AClinicaloriented_MICCAI2024,\n author = { Wang, Yaqi and Chen, Leqi and Hou, Qingshan and Cao, Peng and Yang, Jinzhu and Liu, Xiaoli and Zaiane, Osmar R. },\n title = { { A Clinical-oriented Lightweight Network for High-resolution Medical Image Enhancement } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical images captured in less-than-optimal conditions may suffer from quality degradation, such as blur, artifacts, and low lighting, which potentially leads to misdiagnosis.\nUnfortunately, state-of-the-art medical image enhancement methods face challenges in both high-resolution image quality enhancement and local distinct anatomical structure preservation.\nTo address these issues, we propose a Clinical-oriented High-resolution Lightweight Medical Image Enhancement Network, termed CHLNet, which proficiently addresses high-resolution medical image enhancement, detailed pathological characteristics, and lightweight network design simultaneously.\nMore specifically, CHLNet comprises two main components: \n1) High-resolution Assisted Quality Enhancement Network for removing global low-quality factors in high-resolution images thus enhancing overall image quality;\n2) High-quality-semantic Guided Quality Enhancement Network for capturing semantic knowledge from high-quality images such that detailed structure preservation is enforced.\nMoreover, thanks to its lightweight design, CHLNet can be easily deployed on medical edge devices.\nExtensive experiments on three public medical image datasets demonstrate the effectiveness and superiority of CHLNet over the state-of-the-art.", "title":"A Clinical-oriented Lightweight Network for High-resolution Medical Image Enhancement", "authors":[ "Wang, Yaqi", "Chen, Leqi", "Hou, Qingshan", "Cao, Peng", "Yang, Jinzhu", "Liu, Xiaoli", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":616 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0914_paper.pdf", "bibtext":"@InProceedings{ Kim_MaskFree_MICCAI2024,\n author = { Kim, Hyeon Bae and Ahn, Yong Hyun and Kim, Seong Tae },\n title = { { Mask-Free Neuron Concept Annotation for Interpreting Neural Networks in Medical Domain } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in deep neural networks have shown promise in aiding disease diagnosis and medical decision-making. However, ensuring transparent decision-making processes of AI models in compliance with regulations requires a comprehensive understanding of the model\u2019s internal workings. However, previous methods heavily rely on expensive pixel-wise annotated datasets for interpreting the model, presenting a significant drawback in medical domains. In this paper, we propose a novel medical neuron concept annotation method, named Mask-free Medical Model Interpretation (MAMMI), addresses these challenges. By using a vision-language model, our method relaxes the need for pixel-level masks for neuron concept annotation. MAMMI achieves superior performance compared to other interpretation methods, demonstrating its efficacy in providing rich representations for neurons in medical image analysis. Our experiments on a model trained on NIH chest X-rays validate the effectiveness of MAMMI, showcasing its potential for transparent clinical decision-making in the medical domain. The code is available at https:\/\/github.com\/ailab-kyunghee\/MAMMI.", "title":"Mask-Free Neuron Concept Annotation for Interpreting Neural Networks in Medical Domain", "authors":[ "Kim, Hyeon Bae", "Ahn, Yong Hyun", "Kim, Seong Tae" ], "id":"Conference", "arxiv_id":"2407.11375", "GitHub":[ "https:\/\/github.com\/ailab-kyunghee\/MAMMI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":617 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0762_paper.pdf", "bibtext":"@InProceedings{ Shi_MaskEnhanced_MICCAI2024,\n author = { Shi, Hairong and Han, Songhao and Huang, Shaofei and Liao, Yue and Li, Guanbin and Kong, Xiangxing and Zhu, Hua and Wang, Xiaomu and Liu, Si },\n title = { { Mask-Enhanced Segment Anything Model for Tumor Lesion Semantic Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tumor lesion segmentation on CT or MRI images plays a critical role in cancer diagnosis and treatment planning. Considering the inherent differences in tumor lesion segmentation data across various medical imaging modalities and equipment, integrating medical knowledge into the Segment Anything Model (SAM) presents promising capability due to its versatility and generalization potential. Recent studies have attempted to enhance SAM with medical expertise by pre-training on large-scale medical segmentation datasets. However, challenges still exist in 3D tumor lesion segmentation owing to tumor complexity and the imbalance in foreground and background regions. Therefore, we introduce Mask-Enhanced SAM (M-SAM), an innovative architecture tailored for 3D tumor lesion segmentation. We propose a novel Mask-Enhanced Adapter (MEA) within M-SAM that enriches the semantic information of medical images with positional data from coarse segmentation masks, facilitating the generation of more precise segmentation masks. Furthermore, an iterative refinement scheme is implemented in M-SAM to refine the segmentation masks progressively, leading to improved performance. Extensive experiments on seven tumor lesion segmentation datasets indicate that our M-SAM not only achieves high segmentation accuracy but also exhibits robust generalization. The code is available at https:\/\/github.com\/nanase1025\/M-SAM.", "title":"Mask-Enhanced Segment Anything Model for Tumor Lesion Semantic Segmentation", "authors":[ "Shi, Hairong", "Han, Songhao", "Huang, Shaofei", "Liao, Yue", "Li, Guanbin", "Kong, Xiangxing", "Zhu, Hua", "Wang, Xiaomu", "Liu, Si" ], "id":"Conference", "arxiv_id":"2403.05912", "GitHub":[ "https:\/\/github.com\/nanase1025\/M-SAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":618 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0308_paper.pdf", "bibtext":"@InProceedings{ Gao_CrossDimensional_MICCAI2024,\n author = { Gao, Fei and Wang, Siwen and Zhang, Fandong and Zhou, Hong-Yu and Wang, Yizhou and Wang, Churan and Yu, Gang and Yu, Yizhou },\n title = { { Cross-Dimensional Medical Self-Supervised Representation Learning Based on a Pseudo-3D Transformation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image analysis suffers from a shortage of data, whether annotated or not. This becomes even more pronounced when it comes to 3D medical images. Self-Supervised Learning (SSL) can partially ease this situation by utilizing unlabeled data. However, most existing SSL methods can only make use of data in a single dimensionality (e.g. 2D or 3D), and are incapable of enlarging the training dataset by using data with differing dimensionalities jointly. In this paper, we propose a new cross-dimensional SSL framework based on a pseudo-3D transformation (CDSSL-P3D), that can leverage both 2D and 3D data for joint pre-training. Specifically, we introduce an image transformation based on the im2col algorithm, which converts 2D images into a format consistent with 3D data. This transformation enables seamless integration of 2D and 3D data, and facilitates cross-dimensional self-supervised learning for 3D medical image analysis. We run extensive experiments on 13 downstream tasks, including 2D and 3D classification and segmentation. The results indicate that our CDSSL-P3D achieves superior performance, outperforming other advanced SSL methods.", "title":"Cross-Dimensional Medical Self-Supervised Representation Learning Based on a Pseudo-3D Transformation", "authors":[ "Gao, Fei", "Wang, Siwen", "Zhang, Fandong", "Zhou, Hong-Yu", "Wang, Yizhou", "Wang, Churan", "Yu, Gang", "Yu, Yizhou" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":619 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2686_paper.pdf", "bibtext":"@InProceedings{ Fad_EchoFM_MICCAI2024,\n author = { Fadnavis, Shreyas and Parmar, Chaitanya and Emaminejad, Nastaran and Ulloa Cerna, Alvaro and Malik, Areez and Selej, Mona and Mansi, Tommaso and Dunnmon, Preston and Yardibi, Tarik and Standish, Kristopher and Damasceno, Pablo F. },\n title = { { EchoFM: A View-Independent Echocardiogram Model for the Detection of Pulmonary Hypertension } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Transthoracic Echocardiography (TTE) is the most widely-used screening method for the detection of pulmonary hypertension (PH), a life-threatening cardiopulmonary disorder that requires accurate and timely detection for effective management. Automated PH risk detection from TTE can flag subtle indicators of PH that might be easily missed, thereby decreasing variability between operators and enhancing the positive predictive value of the screening test. Previous algorithms for assessing PH risk still rely on pre-identified, single TTE views which might ignore useful information contained in other recordings. Additionally, these methods focus on discerning PH from healthy controls, limiting their utility as a tool to differentiate PH from conditions that mimic its cardiovascular or respiratory presentation. To address these issues, we propose EchoFM, an architecture that combines self-supervised learning (SSL) and a transformer model for view-independent detection of PH from TTE. EchoFM 1) incorporates a powerful encoder for feature extraction from frames, 2) overcomes the need for explicit TTE view classification by merging features from all available views, 3) uses a transformer to attend to frames of interest without discarding others, and 4) is trained on a realistic clinical dataset which includes mimicking conditions as controls. Extensive experimentation demonstrates that EchoFM significantly improves PH risk detection over state-of-the-art Convolutional Neural Networks (CNNs).", "title":"EchoFM: A View-Independent Echocardiogram Model for the Detection of Pulmonary Hypertension", "authors":[ "Fadnavis, Shreyas", "Parmar, Chaitanya", "Emaminejad, Nastaran", "Ulloa Cerna, Alvaro", "Malik, Areez", "Selej, Mona", "Mansi, Tommaso", "Dunnmon, Preston", "Yardibi, Tarik", "Standish, Kristopher", "Damasceno, Pablo F." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":620 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0740_paper.pdf", "bibtext":"@InProceedings{ Doe_Selfsupervised_MICCAI2024,\n author = { Doerrich, Sebastian and Di Salvo, Francesco and Ledig, Christian },\n title = { { Self-supervised Vision Transformer are Scalable Generative Models for Domain Generalization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite notable advancements, the integration of deep learning (DL) techniques into impactful clinical applications, particularly in the realm of digital histopathology, has been hindered by challenges associated with achieving robust generalization across diverse imaging domains and characteristics. Traditional mitigation strategies in this field such as data augmentation and stain color normalization have proven insufficient in addressing this limitation, necessitating the exploration of alternative methodologies. To this end, we propose a novel generative method for domain generalization in histopathology images. Our method employs a generative, self-supervised Vision Transformer to dynamically extract characteristics of image patches and seamlessly infuse them into the original images, thereby creating novel, synthetic images with diverse attributes. By enriching the dataset with such synthesized images, we aim to enhance its holistic nature, facilitating improved generalization of DL models to unseen domains. Extensive experiments conducted on two distinct histopathology datasets demonstrate the effectiveness of our proposed approach, outperforming the state of the art substantially, on the Camelyon17-WILDS challenge dataset (+2%) and on a second epithelium-stroma dataset (+26%). Furthermore, we emphasize our method\u2019s ability to readily scale with increasingly available unlabeled data samples and more complex, higher parametric architectures. Source code is available at github.com\/sdoerrich97\/vits-are-generative-models.", "title":"Self-supervised Vision Transformer are Scalable Generative Models for Domain Generalization", "authors":[ "Doerrich, Sebastian", "Di Salvo, Francesco", "Ledig, Christian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/sdoerrich97\/vits-are-generative-models" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":621 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2796_paper.pdf", "bibtext":"@InProceedings{ Kim_Best_MICCAI2024,\n author = { Kim, SaeHyun and Choi, Yongjin and Na, Jincheol and Song, In-Seok and Lee, You-Sun and Hwang, Bo-Yeon and Lim, Ho-Kyung and Baek, Seung Jun },\n title = { { Best of Both Modalities: Fusing CBCT and Intraoral Scan Data into a Single Tooth Image } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cone-Beam CT (CBCT) and Intraoral Scan (IOS) are dental imaging techniques widely used for surgical planning and simulation. However, the spatial resolution of crowns is low in CBCT, and roots are not visible in IOS. We propose to take the best of both modalities: a seamless fusion of the crown from IOS and the root from CBCT into a single image in a watertight mesh, unlike prior works that compromise the resolution or simply overlay two images. The main challenges are aligning two images (registration) and fusing them (stitching) despite a large gap in the spatial resolution between two modalities. For effective registration, we propose centroid matching followed by coarse- and fine-registration based on the point-to-plane ICP method. Next, stitching of registered images is done to create a watertight mesh, for which we recursively interpolate the boundary points to seamlessly fill the gap between\nthe registered images. Experiments show that the proposed method incurs low registration error, and the fused images are of high quality and accuracy according to the evaluation by experts.", "title":"Best of Both Modalities: Fusing CBCT and Intraoral Scan Data into a Single Tooth Image", "authors":[ "Kim, SaeHyun", "Choi, Yongjin", "Na, Jincheol", "Song, In-Seok", "Lee, You-Sun", "Hwang, Bo-Yeon", "Lim, Ho-Kyung", "Baek, Seung Jun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":622 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3065_paper.pdf", "bibtext":"@InProceedings{ Liu_ACLNet_MICCAI2024,\n author = { Liu, Chao and Yu, Xueqing and Wang, Dingyu and Jiang, Tingting },\n title = { { ACLNet: A Deep Learning Model for ACL Rupture Classification Combined with Bone Morphology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Magnetic Resonance Imaging (MRI) is widely used in diagnosing anterior cruciate ligament (ACL) injuries due to its ability to provide detailed image data. However, existing deep learning approaches often overlook additional factors beyond the image itself. In this study, we aim to bridge this gap by exploring the relationship between ACL rupture and the bone morphology of the femur and tibia. Leveraging extensive clinical experience, we acknowledge the significance of this morphological data, which is not readily observed manually. To effectively incorporate this vital information, we introduce ACLNet, a novel model that combines the convolutional representation of MRI images with the transformer representation of bone morphological point clouds. This integration significantly enhances ACL injury predictions by leveraging both imaging and geometric data. Our methodology demonstrated an enhancement in diagnostic precision on the in-house dataset compared to image-only methods, elevating the accuracy from 87.59% to 92.57%. This strategy of utilizing implicitly relevant information to enhance performance holds promise for a variety of medical-related tasks.", "title":"ACLNet: A Deep Learning Model for ACL Rupture Classification Combined with Bone Morphology", "authors":[ "Liu, Chao", "Yu, Xueqing", "Wang, Dingyu", "Jiang, Tingting" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":623 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1596_paper.pdf", "bibtext":"@InProceedings{ Lai_From_MICCAI2024,\n author = { Lai, Yuxiang and Chen, Xiaoxi and Wang, Angtian and Yuille, Alan and Zhou, Zongwei },\n title = { { From Pixel to Cancer: Cellular Automata in Computed Tomography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"AI for cancer detection encounters the bottleneck of data scarcity, annotation difficulty, and low prevalence of early tumors. Tumor synthesis seeks to create artificial tumors in medical images, which can greatly diversify the data and annotations for AI training. However, current tumor synthesis approaches are not applicable across different organs due to their need for specific expertise and design. This paper establishes a set of generic rules to simulate tumor development. Each cell (pixel) is initially assigned a state between zero and ten to represent the tumor population, and a tumor can be developed based on three rules to describe the process of growth, invasion, and death. We apply these three generic rules to simulate tumor development\u2014from pixel to cancer\u2014using cellular automata. We then integrate the tumor state into the original computed tomography (CT) images to generate synthetic tumors across different organs. This tumor synthesis approach allows for sampling tumors at multiple stages and analyzing tumor-organ interaction. Clinically, a reader study involving three expert radiologists reveals that the synthetic tumors and their developing trajectories are convincingly realistic. Technically, we analyze and simulate tumor development at various stages using 9,262 raw, unlabeled CT images sourced from 68 hospitals worldwide. The performance in segmenting tumors in the liver, pancreas, and kidneys exceeds prevailing literature benchmarks, underlining the immense potential of tumor synthesis, especially for earlier cancer detection.", "title":"From Pixel to Cancer: Cellular Automata in Computed Tomography", "authors":[ "Lai, Yuxiang", "Chen, Xiaoxi", "Wang, Angtian", "Yuille, Alan", "Zhou, Zongwei" ], "id":"Conference", "arxiv_id":"2403.06459", "GitHub":[ "https:\/\/github.com\/MrGiovanni\/Pixel2Cancer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":624 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1171_paper.pdf", "bibtext":"@InProceedings{ Ngu_TrainingFree_MICCAI2024,\n author = { Nguyen, Van Phi and Luong Ha, Tri Nhan and Pham, Huy Hieu and Tran, Quoc Long },\n title = { { Training-Free Condition Video Diffusion Models for single frame Spatial-Semantic Echocardiogram Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Condition video diffusion models (CDM) have shown promising results for video synthesis, potentially enabling the generation of realistic echocardiograms to address the problem of data scarcity. However, current CDMs require a paired segmentation map and echocardiogram dataset. We present a new method called Free-Echo for generating realistic echocardiograms from a single end-diastolic segmentation map without additional training data. Our method is based on the 3D-Unet with Temporal Attention Layers model and is conditioned on the segmentation map using a training-free conditioning method based on SDEdit. We evaluate our model on two public echocardiogram datasets, CAMUS and EchoNet-Dynamic. We show that our model can generate plausible echocardiograms that are spatially aligned with the input segmentation map, achieving performance comparable to training-based CDMs. Our work opens up new possibilities for generating echocardiograms from a single segmentation map, which can be used for data augmentation, domain adaptation, and other applications in medical imaging. Our code is available at \\url{https:\/\/github.com\/gungui98\/echo-free}", "title":"Training-Free Condition Video Diffusion Models for single frame Spatial-Semantic Echocardiogram Synthesis", "authors":[ "Nguyen, Van Phi", "Luong Ha, Tri Nhan", "Pham, Huy Hieu", "Tran, Quoc Long" ], "id":"Conference", "arxiv_id":"2408.03035", "GitHub":[ "https:\/\/github.com\/gungui98\/echo-free" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":625 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1396_paper.pdf", "bibtext":"@InProceedings{ T\u00f6l_FUNAvg_MICCAI2024,\n author = { T\u00f6lle, Malte and Navarro, Fernando and Eble, Sebastian and Wolf, Ivo and Menze, Bjoern and Engelhardt, Sandy },\n title = { { FUNAvg: Federated Uncertainty Weighted Averaging for Datasets with Diverse Labels } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning is one popular paradigm to train a joint model in a distributed, privacy-preserving environment. But partial annotations pose an obstacle meaning that categories of labels are heterogeneous over clients. We propose to learn a joint backbone in a federated manner, while each site receives its own multi-label segmentation head. By using Bayesian techniques we observe that the different segmentation heads although only trained on the individual client\u2019s labels also learn information about the other labels not present at the respective site. This information is encoded in their predictive uncertainty. To obtain a final prediction we leverage this uncertainty and perform a weighted averaging of the ensemble of distributed segmentation heads, which allows us to segment \u201clocally unknown\u201d structures. With our method, which we refer to as FUNAvg, we are even on-par with the models trained and tested on the same dataset on average. The code is publicly available at https:\/\/github.com\/Cardio-AI\/FUNAvg.", "title":"FUNAvg: Federated Uncertainty Weighted Averaging for Datasets with Diverse Labels", "authors":[ "T\u00f6lle, Malte", "Navarro, Fernando", "Eble, Sebastian", "Wolf, Ivo", "Menze, Bjoern", "Engelhardt, Sandy" ], "id":"Conference", "arxiv_id":"2407.07488", "GitHub":[ "https:\/\/github.com\/Cardio-AI\/FUNAvg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":626 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1334_paper.pdf", "bibtext":"@InProceedings{ Lu_DiffVPS_MICCAI2024,\n author = { Lu, Yingling and Yang, Yijun and Xing, Zhaohu and Wang, Qiong and Zhu, Lei },\n title = { { Diff-VPS: Video Polyp Segmentation via a Multi-task Diffusion Network with Adversarial Temporal Reasoning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion Probabilistic Models have recently attracted significant attention in the community of computer vision due to their outstanding performance. However, while a substantial amount of diffusion-based research has focused on generative tasks, no work introduces diffusion models to advance the results of polyp segmentation in videos, which is frequently challenged by polyps\u2019 high camouflage and redundant temporal cues. In this paper, we present a novel diffusion-based network for video polyp segmentation task, dubbed as Diff-VPS. We incorporate multi-task supervision into diffusion models to promote the discrimination of diffusion models on pixel-by-pixel segmentation. This integrates the contextual high-level information achieved by the joint classification and detection tasks. To explore the temporal dependency, Temporal Reasoning Module (TRM) is devised via reasoning and reconstructing the target frame from the previous frames. We further equip TRM with a generative adversarial self-supervised strategy to produce more realistic frames and thus capture better dynamic cues. Extensive experiments are conducted on SUN-SEG, and the results indicate that our proposed Diff-VPS significantly achieves state-of-the-art performance. Code is available at https:\/\/github.com\/lydia-yllu\/Diff-VPS.", "title":"Diff-VPS: Video Polyp Segmentation via a Multi-task Diffusion Network with Adversarial Temporal Reasoning", "authors":[ "Lu, Yingling", "Yang, Yijun", "Xing, Zhaohu", "Wang, Qiong", "Zhu, Lei" ], "id":"Conference", "arxiv_id":"2409.07238", "GitHub":[ "https:\/\/github.com\/lydia-yllu\/Diff-VPS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":627 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3750_paper.pdf", "bibtext":"@InProceedings{ Yu_CPCLIP_MICCAI2024,\n author = { Yu, Xiaowei and Wu, Zihao and Zhang, Lu and Zhang, Jing and Lyu, Yanjun and Zhu, Dajiang },\n title = { { CP-CLIP: Core-Periphery Feature Alignment CLIP for Zero-Shot Medical Image Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multi-modality learning, exemplified by the language and\nimage pair pre-trained CLIP model, has demonstrated remarkable performance in enhancing zero-shot capabilities and has gained significant attention in the field. However, simply applying language-image pre-trained CLIP to medical image analysis encounters substantial domain shifts, resulting in significant performance degradation due to inherent disparities between natural (non-medical) and medical image characteristics. To address this challenge and uphold or even enhance CLIP\u2019s zero-shot\ncapability in medical image analysis, we develop a novel framework, Core-Periphery feature alignment for CLIP (CP-CLIP), tailored for handling medical images and corresponding clinical reports. Leveraging the foundational core-periphery organization that has been widely observed in brain networks, we augment CLIP by integrating a novel core-peripheryguided neural network. This auxiliary CP network not only aligns text and image features into a unified latent space more efficiently but also ensures the alignment is driven by domain-specific core information, e.g., in medical images and clinical reports. In this way, our approach effectively\nmitigates and further enhances CLIP\u2019s zero-shot performance in\nmedical image analysis. More importantly, our designed CP-CLIP exhibits excellent explanatory capability, enabling the automatic identification of critical regions in clinical analysis. Extensive experimentation and evaluation across five public datasets underscore the superiority of our CP-CLIP in zero-shot medical image prediction and critical area detection, showing its promising utility in multimodal feature alignment in current medical applications.", "title":"CP-CLIP: Core-Periphery Feature Alignment CLIP for Zero-Shot Medical Image Analysis", "authors":[ "Yu, Xiaowei", "Wu, Zihao", "Zhang, Lu", "Zhang, Jing", "Lyu, Yanjun", "Zhu, Dajiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":628 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0234_paper.pdf", "bibtext":"@InProceedings{ Elb_FDSOS_MICCAI2024,\n author = { Elbatel, Marawan and Liu, Keyuan and Yang, Yanqi and Li, Xiaomeng },\n title = { { FD-SOS: Vision-Language Open-Set Detectors for Bone Fenestration and Dehiscence Detection from Intraoral Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate detection of bone fenestration and dehiscence (FD) is of utmost importance for effective treatment planning in dentistry. While cone-beam computed tomography (CBCT) is the gold standard for evaluating FD, it comes with limitations such as radiation exposure, limited accessibility, and higher cost compared to intraoral images. In intraoral images, dentists face challenges in the differential diagnosis of FD. This paper presents a novel and clinically significant application of FD detection solely from intraoral images, eliminating the need for CBCT. To achieve this, we propose FD-SOS, a novel open-set object detector for FD detection from intraoral images. FD-SOS has two novel components: conditional contrastive denoising (CCDN) and teeth-specific matching assignment (TMA). These modules enable FD-SOS to effectively leverage external dental semantics. Experimental results showed that our method outperformed existing detection methods and surpassed dental professionals by 35% recall under the same level of precision. Code is available at {https:\/\/github.com\/xmed-lab\/FD-SOS.", "title":"FD-SOS: Vision-Language Open-Set Detectors for Bone Fenestration and Dehiscence Detection from Intraoral Images", "authors":[ "Elbatel, Marawan", "Liu, Keyuan", "Yang, Yanqi", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"2407.09088", "GitHub":[ "https:\/\/github.com\/xmed-lab\/FD-SOS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":629 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3737_paper.pdf", "bibtext":"@InProceedings{ Zho_Gradient_MICCAI2024,\n author = { Zhou, Li and Wang, Dayang and Xu, Yongshun and Han, Shuo and Morovati, Bahareh and Fan, Shuyi and Yu, Hengyong },\n title = { { Gradient Guided Co-Retention Feature Pyramid Network for LDCT Image Denoising } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Low-dose computed tomography (LDCT) reduces the risks of radiation exposure but introduces noise and artifacts into CT images. The Feature Pyramid Network (FPN) is a conventional method for extracting multi-scale feature maps from input images. While upper layers in FPN enhance semantic value, details become generalized with reduced spatial resolution at each layer. In this work, we propose a Gradient Guided Co-Retention Feature Pyramid Network (G2CR-FPN) to address the connection between spatial resolution and semantic value beyond feature maps extracted from LDCT images. The network is structured with three essential paths: the bottom-up path utilizes the FPN structure to generate the hierarchical feature maps, representing multi-scale spatial resolutions and semantic values. Meanwhile, the lateral path serves as a skip connection between feature maps with the same spatial resolution, while also functioning feature maps as directional gradients. This path incorporates a gradient approximation, deriving edge-like enhanced feature maps in horizontal and vertical directions. The top-down path incorporates a proposed co-retention block that learns the high-level semantic value embedded in the preceding map of the path. This learning process is guided by the directional gradient approximation of the high-resolution feature map from the bottom-up path. Experimental results on the clinical CT images demonstrated the promising performance of the model.", "title":"Gradient Guided Co-Retention Feature Pyramid Network for LDCT Image Denoising", "authors":[ "Zhou, Li", "Wang, Dayang", "Xu, Yongshun", "Han, Shuo", "Morovati, Bahareh", "Fan, Shuyi", "Yu, Hengyong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":630 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2553_paper.pdf", "bibtext":"@InProceedings{ Jia_AnatomyAware_MICCAI2024,\n author = { Jiang, Hongchao and Miao, Chunyan },\n title = { { Anatomy-Aware Gating Network for Explainable Alzheimer\u2019s Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Structural Magnetic Resonance Imaging (sMRI) is a non-invasive technique to get a snapshot of the brain for diagnosing Alzheimer\u2019s disease. Existing works have used 3D brain images to train deep learning models for automated diagnosis, but these models are prone to exploit shortcut patterns that might not have clinical relevance. We propose an Anatomy-Aware Gating Network (AAGN) which explicitly extracts features from various anatomical regions using an anatomy-aware squeeze-and-excite operation. By conditioning on the anatomy-aware features, AAGN dynamically selects the regions where atrophy is most discriminative. Once trained, we can interpret the regions selected by AAGN as explicit explanations for a given prediction. Our experiments show that AAGN selects regions well-aligned with medical literature and outperforms various convolutional and attention architectures. The code is available at \\url{https:\/\/github.com\/hongcha0\/aagn}.", "title":"Anatomy-Aware Gating Network for Explainable Alzheimer\u2019s Disease Diagnosis", "authors":[ "Jiang, Hongchao", "Miao, Chunyan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/hongcha0\/aagn" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":631 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1315_paper.pdf", "bibtext":"@InProceedings{ Ber_Diffusion_MICCAI2024,\n author = { Bercea, Cosmin I. and Wiestler, Benedikt and Rueckert, Daniel and Schnabel, Julia A. },\n title = { { Diffusion Models with Implicit Guidance for Medical Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models have advanced unsupervised anomaly detection by improving the transformation of pathological images into pseudo-healthy equivalents. Nonetheless, standard approaches may compromise critical information during pathology removal, leading to restorations that do not align with unaffected regions in the original scans. Such discrepancies can inadvertently increase false positive rates and reduce specificity, complicating radiological evaluations. This paper introduces Temporal Harmonization for Optimal Restoration (THOR), which refines the reverse diffusion process by integrating implicit guidance through intermediate masks. THOR aims to preserve the integrity of healthy tissue details in reconstructed images, ensuring fidelity to the original scan in areas unaffected by pathology. Comparative evaluations reveal that THOR surpasses existing diffusion-based methods in retaining detail and precision in image restoration and detecting and segmenting anomalies in brain MRIs and wrist X-rays. Code: https:\/\/github.com\/compai-lab\/2024-miccai-bercea-thor.git.", "title":"Diffusion Models with Implicit Guidance for Medical Anomaly Detection", "authors":[ "Bercea, Cosmin I.", "Wiestler, Benedikt", "Rueckert, Daniel", "Schnabel, Julia A." ], "id":"Conference", "arxiv_id":"2403.08464", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-bercea-thor.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":632 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1242_paper.pdf", "bibtext":"@InProceedings{ Yan_TSBP_MICCAI2024,\n author = { Yang, Tingting and Xiao, Liang and Zhang, Yizhe },\n title = { { TSBP: Improving Object Detection in Histology Images via Test-time Self-guided Bounding-box Propagation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we propose a Test-time Self-guided Bounding-box Propagation (TSBP) method, leveraging Earth Mover\u2019s Distance (EMD) to enhance object detection in histology images. TSBP utilizes bounding boxes with high confidence to influence those with low confidence, leveraging visual similarities between them. This propagation mechanism enables bounding boxes to be selected in a controllable, explainable, and robust manner, which surpasses the effectiveness of using simple thresholds and uncertainty calibration methods. Importantly, TSBP does not necessitate additional labeled samples for model training or parameter estimation, unlike calibration methods. We conduct experiments on gland detection and cell detection tasks in histology images. The results show that our proposed TSBP significantly improves detection outcomes when working in conjunction with state-of-the-art deep learning-based detection networks. Compared to other methods such as uncertainty calibration, TSBP yields more robust and accurate object detection predictions while using no additional labeled samples.", "title":"TSBP: Improving Object Detection in Histology Images via Test-time Self-guided Bounding-box Propagation", "authors":[ "Yang, Tingting", "Xiao, Liang", "Zhang, Yizhe" ], "id":"Conference", "arxiv_id":"2409.16678", "GitHub":[ "https:\/\/github.com\/jwhgdeu\/TSBP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":633 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4180_paper.pdf", "bibtext":"@InProceedings{ Reh_ALargescale_MICCAI2024,\n author = { Rehman, Abdul and Meraj, Talha and Minhas, Aiman Mahmood and Imran, Ayisha and Ali, Mohsen and Sultani, Waqas },\n title = { { A Large-scale Multi Domain Leukemia Dataset for the White Blood Cells Detection with Morphological Attributes for Explainability } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Earlier diagnosis of Leukemia can save thousands of lives annually.\nThe prognosis of leukemia is challenging without the morphological information of White Blood Cells (WBC) and relies on the accessibility of expensive microscopes and the availability of hematologists to analyze Peripheral Blood Samples (PBS). Deep Learning based methods can be employed to assist hematologists. However, these algorithms require a large amount of labeled data, which is not readily available. To overcome this limitation, we have acquired a realistic, generalized, and {large} dataset. To collect this comprehensive dataset for real-world applications, two microscopes from two different cost spectrum\u2019s (high-cost: HCM and low-cost: LCM) are used for dataset capturing at three magnifications (100x, 40x,10x) through different sensors (high-end camera for HCM, middle-level camera for LCM and mobile-phone\u2019s camera for both). The high-sensor camera is 47 times more expensive than the middle-level camera and HCM is 17 times more expensive than LCM. In this collection, using HCM at high resolution (100x), experienced hematologists annotated 10.3k WBC of 14 types including artifacts, having 55k morphological labels (Cell Size, Nuclear Chromatin, Nuclear Shape, etc) from 2.4k images of several PBS leukemia patients. Later on, these annotations are transferred to other two magnifications of HCM, and three magnifications of LCM, and on each camera captured images. Along with this proposed LeukemiaAttri dataset, we provide baselines over multiple object detectors and Unsupervised Domain Adaptation (UDA) strategies, along with morphological information-based attribute prediction. The dataset is available at: https:\/\/tinyurl.com\/586vaw3j", "title":"A Large-scale Multi Domain Leukemia Dataset for the White Blood Cells Detection with Morphological Attributes for Explainability", "authors":[ "Rehman, Abdul", "Meraj, Talha", "Minhas, Aiman Mahmood", "Imran, Ayisha", "Ali, Mohsen", "Sultani, Waqas" ], "id":"Conference", "arxiv_id":"2405.10803", "GitHub":[ "https:\/\/github.com\/intelligentMachines-ITU\/Blood-Cancer-Dataset" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":634 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2187_paper.pdf", "bibtext":"@InProceedings{ Li_FewShot_MICCAI2024,\n author = { Li, Yi and Zhang, Qixiang and Xiang, Tianqi and Lin, Yiqun and Zhang, Qingling and Li, Xiaomeng },\n title = { { Few-Shot Lymph Node Metastasis Classification Meets High Performance on Whole Slide Images via the Informative Non-Parametric Classifier } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Lymph node metastasis (LNM) classification is crucial for breast cancer staging. However, the process of identifying tiny metastatic cancer cells within gigapixel whole slide image (WSI) is tedious, time-consuming, and expensive. To address this challenge, computational pathology methods have emerged, particularly multiple instance learning (MIL) based on deep learning. But these methods require massive amounts of data, while existing few-shot methods severely compromise accuracy for data saving. To simultaneously achieve few-shot and high performance LNM classification, we propose the informative non-parametric classifier (INC). It maintains informative local patch features divided by mask label, then innovatively utilizes non-parametric similarity to classify LNM, avoiding overfitting on a few WSI examples. Experimental results demonstrate that the proposed INC outperforms existing SoTA methods across various settings, with less data and labeling cost. For the same setting, we achieve remarkable AUC improvements over 29.07% on CAMELYON16. Additionally, our approach demonstrates excellent generalizability across multiple medical centers and corrupted WSIs, even surpassing many-shot SoTA methods over 7.55% on CAMELYON16-C.", "title":"Few-Shot Lymph Node Metastasis Classification Meets High Performance on Whole Slide Images via the Informative Non-Parametric Classifier", "authors":[ "Li, Yi", "Zhang, Qixiang", "Xiang, Tianqi", "Lin, Yiqun", "Zhang, Qingling", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":635 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0929_paper.pdf", "bibtext":"@InProceedings{ Wei_Prompting_MICCAI2024,\n author = { Wei, Zhikai and Dong, Wenhui and Zhou, Peilin and Gu, Yuliang and Zhao, Zhou and Xu, Yongchao },\n title = { { Prompting Segment Anything Model with Domain-Adaptive Prototype for Generalizable Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning based methods often suffer from performance degradation caused by domain shift. In recent years, many sophisticated network structures have been designed to tackle this problem. However, the advent of large model trained on massive data, with its exceptional segmentation capability, introduces a new perspective for solving medical segmentation problems. In this paper, we propose a novel Domain-Adaptive Prompt framework for fine-tuning the Segment Anything Model (termed as DAPSAM) to address single-source domain generalization (SDG) in segmenting medical images. DAPSAM not only utilizes a more generalization-friendly adapter to fine-tune the large model, but also introduces a self-learning prototype-based prompt generator to enhance model\u2019s generalization ability. Specifically, we first merge the important low-level features into intermediate features before feeding to each adapter, followed by an attention filter to remove redundant information. This yields more robust image embeddings. Then, we propose using a learnable memory bank to construct domain-adaptive prototypes for prompt generation, helping to achieve generalizable medical image segmentation. Extensive experimental results demonstrate that our DAPSAM achieves state-of-the-art performance on two SDG medical image segmentation tasks with different modalities. The code is available at https:\/\/github.com\/wkklavis\/DAPSAM.", "title":"Prompting Segment Anything Model with Domain-Adaptive Prototype for Generalizable Medical Image Segmentation", "authors":[ "Wei, Zhikai", "Dong, Wenhui", "Zhou, Peilin", "Gu, Yuliang", "Zhao, Zhou", "Xu, Yongchao" ], "id":"Conference", "arxiv_id":"2409.12522", "GitHub":[ "https:\/\/github.com\/wkklavis\/DAPSAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":636 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0908_paper.pdf", "bibtext":"@InProceedings{ Kan_BGFYOLO_MICCAI2024,\n author = { Kang, Ming and Ting, Chee-Ming and Ting, Fung Fung and Phan, Rapha\u00ebl C.-W. },\n title = { { BGF-YOLO: Enhanced YOLOv8 with Multiscale Attentional Feature Fusion for Brain Tumor Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"You Only Look Once (YOLO)-based object detectors have shown remarkable accuracy for automated brain tumor detection. In this paper, we develop a novel BGF-YOLO architecture by incorporating Bi-level Routing Attention (BRA), Generalized feature pyramid networks (GFPN), and Fourth detecting head into YOLOv8. BGF-YOLO contains an attention mechanism to focus more on important features, and feature pyramid networks to enrich feature representation by merging high-level semantic features with spatial details. Furthermore, we investigate the effect of different attention mechanisms and feature fusions, detection head architectures on brain tumor detection accuracy. Experimental results show that BGF-YOLO gives a 4.7% absolute increase of mAP50 compared to YOLOv8x, and achieves state-of-the-art on the brain tumor detection dataset Br35H. The code is available at https:\/\/github.com\/mkang315\/BGF-YOLO.", "title":"BGF-YOLO: Enhanced YOLOv8 with Multiscale Attentional Feature Fusion for Brain Tumor Detection", "authors":[ "Kang, Ming", "Ting, Chee-Ming", "Ting, Fung Fung", "Phan, Rapha\u00ebl C.-W." ], "id":"Conference", "arxiv_id":"2309.12585", "GitHub":[ "https:\/\/github.com\/mkang315\/BGF-YOLO" ], "paper_page":"https:\/\/huggingface.co\/papers\/2309.12585", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":4, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":637 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1184_paper.pdf", "bibtext":"@InProceedings{ Yu_I2Net_MICCAI2024,\n author = { Yu, Jiahao and Duan, Fan and Chen, Li },\n title = { { I2Net: Exploiting Misaligned Contexts Orthogonally with Implicit-Parameterized Implicit Functions for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent medical image segmentation methods have started to apply implicit neural representation (INR) to segmentation networks to learn continuous data representations. Though effective, they suffer from inferior performance. In this paper, we delve into the inferiority and discover that the underlying reason behind it is the indiscriminate treatment for context fusion that fails to properly exploit misaligned contexts. Therefore, we propose a novel Implicit-parameterized INR Network (I2Net), which dynamically generates the model parameters of INRs to adapt to different misaligned contexts. We further propose novel gate shaping and learner orthogonalization to induce I2Net to handle misaligned contexts in orthogonal ways. We conduct extensive experiments on two medical datasets, i.e. Glas and Synapse, and a generic dataset, i.e. Cityscapes, to show the superiority of our I2Net. Code: https:\/\/github.com\/ChineseYjh\/I2Net.", "title":"I2Net: Exploiting Misaligned Contexts Orthogonally with Implicit-Parameterized Implicit Functions for Medical Image Segmentation", "authors":[ "Yu, Jiahao", "Duan, Fan", "Chen, Li" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ChineseYjh\/I2Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":638 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0791_paper.pdf", "bibtext":"@InProceedings{ Li_EndoSparse_MICCAI2024,\n author = { Li, Chenxin and Feng, Brandon Y. and Liu, Yifan and Liu, Hengyu and Wang, Cheng and Yu, Weihao and Yuan, Yixuan },\n title = { { EndoSparse: Real-Time Sparse View Synthesis of Endoscopic Scenes using Gaussian Splatting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D reconstruction of biological tissues from a collection of endoscopic images is a key to unlock various important downstream surgical applications with 3D capabilities. Existing methods employ various advanced neural rendering techniques for photorealistic view synthesis, but they often struggle to recover accurate 3D representations when only sparse observations are available, which is often the case in real-world clinical scenarios. To tackle this sparsity challenge, we propose a framework leveraging the prior knowledge from multiple foundation models during the reconstruction process. Experimental results indicate that our proposed strategy significantly improves the geometric and appearance quality under challenging sparse-view conditions, including using only three views. In rigorous benchmarking experiments against the state-of-the-art methods, EndoSparse achieves superior results in terms of accurate geometry, realistic appearance, and rendering efficiency, confirming the robustness to the sparse-view limitation in endoscopic reconstruction. EndoSparse signifies a steady step towards the practical deployment of neural 3D reconstruction in real-world clinical scenarios. Project page: \\https:\/\/endo-sparse.github.io\/.", "title":"EndoSparse: Real-Time Sparse View Synthesis of Endoscopic Scenes using Gaussian Splatting", "authors":[ "Li, Chenxin", "Feng, Brandon Y.", "Liu, Yifan", "Liu, Hengyu", "Wang, Cheng", "Yu, Weihao", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"2407.01029", "GitHub":[ "https:\/\/endo-sparse.github.io\/\nhttps:\/\/github.com\/CUHK-AIM-Group\/EndoSparse" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":639 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1220_paper.pdf", "bibtext":"@InProceedings{ Yan_Surgformer_MICCAI2024,\n author = { Yang, Shu and Luo, Luyang and Wang, Qiong and Chen, Hao },\n title = { { Surgformer: Surgical Transformer with Hierarchical Temporal Attention for Surgical Phase Recognition } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Existing state-of-the-art methods for surgical phase recognition either rely on the extraction of spatial-temporal features at a short-range temporal resolution or adopt the sequential extraction of the spatial and temporal features across the entire temporal resolution. However, these methods have limitations in modeling spatial-temporal dependency and addressing spatial-temporal redundancy: 1) These methods fail to effectively model spatial-temporal dependency, due to the lack of long-range information or joint spatial-temporal modeling. 2) These methods utilize dense spatial features across the entire temporal resolution, resulting in significant spatial-temporal redundancy. In this paper, we propose the Surgical Transformer (Surgformer) to address the issues of spatial-temporal modeling and redundancy in an end-to-end manner, which employs divided spatial-temporal attention and takes a limited set of sparse frames as input. Moreover, we propose a novel Hierarchical Temporal Attention (HTA) to capture both global and local information within varied temporal resolutions from a target frame-centric perspective. Distinct from conventional temporal attention that primarily emphasizes dense long-range similarity, HTA not only captures long-term information but also considers local latent consistency among informative frames. HTA then employs pyramid feature aggregation to effectively utilize temporal information across diverse temporal resolutions, thereby enhancing the overall temporal representation. Extensive experiments on two challenging benchmark datasets verify that our proposed Surgformer performs favorably against the state-of-the-art methods. The code is released at https:\/\/github.com\/isyangshu\/Surgformer.", "title":"Surgformer: Surgical Transformer with Hierarchical Temporal Attention for Surgical Phase Recognition", "authors":[ "Yang, Shu", "Luo, Luyang", "Wang, Qiong", "Chen, Hao" ], "id":"Conference", "arxiv_id":"2408.03867", "GitHub":[ "https:\/\/github.com\/isyangshu\/Surgformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":640 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0658_paper.pdf", "bibtext":"@InProceedings{ Hag_Deep_MICCAI2024,\n author = { Hagag, Amr and Gomaa, Ahmed and Kornek, Dominik and Maier, Andreas and Fietkau, Rainer and Bert, Christoph and Huang, Yixing and Putz, Florian },\n title = { { Deep Learning for Cancer Prognosis Prediction Using Portrait Photos by StyleGAN Embedding } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival prediction for cancer patients is critical for optimal treatment selection and patient management. Current patient survival prediction methods typically extract survival information from patients\u2019 clinical record data or biological and imaging data. In practice, experienced clinicians can have a preliminary assessment of patients\u2019 health status based on patients\u2019 observable physical appearances, which are mainly facial features. However, such assessment is highly subjective. In this work, the efficacy of objectively capturing and using prognostic information contained in conventional portrait photographs using deep learning for survival prediction purposes is investigated for the first time. A pre-trained StyleGAN2 model is fine-tuned on a custom dataset of our cancer patients\u2019 photos to empower its generator with generative ability suitable for patients\u2019 photos. The StyleGAN2 is then used to embed the photographs to its highly expressive latent space. Utilizing state-of- the-art survival analysis models and StyleGAN\u2019s latent space embeddings, this approach predicts the overall survival for single as well as pancancer, achieving a C-index of 0.680 in a pan-cancer analysis, showcasing the prognostic value embedded in simple 2D facial images. In addition, thanks to StyleGAN\u2019s interpretable latent space, our survival prediction model can be validated for relying on essential facial features, eliminating any biases from extraneous information like clothing or background. Moreover, our approach provides a novel health attribute obtained from StyleGAN\u2019s extracted features, allowing the modification of face photographs to either a healthier or more severe illness appearance, which has significant prognostic value for patient care and societal perception, underscoring its potential important clinical value.", "title":"Deep Learning for Cancer Prognosis Prediction Using Portrait Photos by StyleGAN Embedding", "authors":[ "Hagag, Amr", "Gomaa, Ahmed", "Kornek, Dominik", "Maier, Andreas", "Fietkau, Rainer", "Bert, Christoph", "Huang, Yixing", "Putz, Florian" ], "id":"Conference", "arxiv_id":"2306.14596", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":641 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2724_paper.pdf", "bibtext":"@InProceedings{ Kun_Training_MICCAI2024,\n author = { Kunanbayev, Kassymzhomart and Shen, Vyacheslav and Kim, Dae-Shik },\n title = { { Training ViT with Limited Data for Alzheimer\u2019s Disease Classification: an Empirical Study } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper, we conduct an extensive exploration of a Vision Transformer (ViT) in brain medical imaging in a low-data regime. The recent and ongoing success of Vision Transformers in computer vision has motivated its development in medical imaging, but trumping it with inductive bias in a brain imaging domain imposes a real challenge since collecting and accessing large amounts of brain medical data is a labor-intensive process. Motivated by the need to bridge this data gap, we embarked on an investigation into alternative training strategies ranging from self-supervised pre-training to knowledge distillation to determine the feasibility of producing a practical plain ViT model. To this end, we conducted an intensive set of experiments using a small amount of labeled 3D brain MRI data for the task of Alzheimer\u2019s disease classification. As a result, our experiments yield an optimal training recipe, thus paving the way for Vision Transformer-based models for other low-data medical imaging applications. To bolster further development, we release our assortment of pre-trained models for a variety of MRI-related applications: https:\/\/github.com\/qasymjomart\/ViT_recipe_for_AD", "title":"Training ViT with Limited Data for Alzheimer\u2019s Disease Classification: an Empirical Study", "authors":[ "Kunanbayev, Kassymzhomart", "Shen, Vyacheslav", "Kim, Dae-Shik" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/qasymjomart\/ViT_recipe_for_AD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":642 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0311_paper.pdf", "bibtext":"@InProceedings{ \u00d6zs_ORacle_MICCAI2024,\n author = { \u00d6zsoy, Ege and Pellegrini, Chantal and Keicher, Matthias and Navab, Nassir },\n title = { { ORacle: Large Vision-Language Models for Knowledge-Guided Holistic OR Domain Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Every day, countless surgeries are performed worldwide, each within the distinct settings of operating rooms (ORs) that vary not only in their setups but also in the personnel, tools, and equipment used. This inherent diversity poses a substantial challenge for achieving a holistic understanding of the OR, as it requires models to generalize beyond their initial training datasets. To reduce this gap, we introduce ORacle, an advanced vision-language model designed for holistic OR domain modeling, which incorporates multi-view and temporal capabilities and can leverage external knowledge during inference, enabling it to adapt to previously unseen surgical scenarios. This capability is further enhanced by our novel data augmentation framework, which significantly diversifies the training dataset, ensuring ORacle\u2019s proficiency in applying the provided knowledge effectively. In rigorous testing, in scene graph generation, and downstream tasks on the 4D-OR dataset, ORacle not only demonstrates state-of-the-art performance but does so requiring less data than existing models. Furthermore, its adaptability is displayed through its ability to interpret unseen views, actions, and appearances of tools and equipment. This demonstrates ORacle\u2019s potential to significantly enhance the scalability and affordability of OR domain modeling and opens a pathway for future advancements in surgical data science. We will release our code and data upon acceptance.", "title":"ORacle: Large Vision-Language Models for Knowledge-Guided Holistic OR Domain Modeling", "authors":[ "\u00d6zsoy, Ege", "Pellegrini, Chantal", "Keicher, Matthias", "Navab, Nassir" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/egeozsoy\/ORacle" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":643 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1998_paper.pdf", "bibtext":"@InProceedings{ Ala_Jumpstarting_MICCAI2024,\n author = { Alapatt, Deepak and Murali, Aditya and Srivastav, Vinkle and AI4SafeChole Consortium and Mascagni, Pietro and Padoy, Nicolas },\n title = { { Jumpstarting Surgical Computer Vision } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Consensus amongst researchers and industry points to a lack of large, representative annotated datasets as the biggest obstacle to progress in the field of surgical data science. Advances in Self-Supervised Learning (SSL) represent a solution, reducing the dependence on large labeled datasets by providing task-agnostic initializations. However, the robustness of current self-supervised learning methods to domain shifts remains unclear, limiting our understanding of its utility for leveraging diverse sources of surgical data. Shifting the focus from methods to data, we demonstrate that the downstream value of SSL-based initializations is intricately intertwined with the composition of pre-training datasets. These results underscore an important gap that needs to be filled as we scale self-supervised approaches toward building general-purpose \u201cfoundation models\u201d that enable diverse use-cases within the surgical domain. Through several stages of controlled experimentation, we develop recommendations for pretraining dataset composition evidenced through over 300 experiments spanning 20 pre-training datasets, 9 surgical procedures, 7 centers (hospitals), 3 labeled-data settings, 3 downstream tasks, and multiple runs. Using the approaches here described, we outperform state-of-the-art pre-trainings on two public benchmarks for phase recognition: up to 2.2% on Cholec80 and 5.1% on AutoLaparo.", "title":"Jumpstarting Surgical Computer Vision", "authors":[ "Alapatt, Deepak", "Murali, Aditya", "Srivastav, Vinkle", "AI4SafeChole Consortium", "Mascagni, Pietro", "Padoy, Nicolas" ], "id":"Conference", "arxiv_id":"2312.05968", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":644 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3398_paper.pdf", "bibtext":"@InProceedings{ Szc_Let_MICCAI2024,\n author = { Szczepa\u0144ski, Tomasz and Grzeszczyk, Michal K. and P\u0142otka, Szymon and Adamowicz, Arleta and Fudalej, Piotr and Korzeniowski, Przemys\u0142aw and Trzcin\u0301ski, Tomasz and Sitek, Arkadiusz },\n title = { { Let Me DeCode You: Decoder Conditioning with Tabular Data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Training deep neural networks for 3D segmentation tasks can be challenging, often requiring efficient and effective strategies to improve model performance. In this study, we introduce a novel approach, DeCode, that utilizes label-derived features for model conditioning to support the decoder in the reconstruction process dynamically, aiming to enhance the efficiency of the training process. DeCode focuses on improving 3D segmentation performance through the incorporation of conditioning embedding with learned numerical representation of 3D-label shape features. Specifically, we develop an approach, where conditioning is applied during the training phase to guide the network toward robust segmentation. When labels are not available during inference, our model infers the necessary conditioning embedding directly from the input data, thanks to a feed-forward network learned during the training phase. This approach is tested using synthetic data and cone-beam computed tomography (CBCT) images of teeth. For CBCT, three datasets are used: one publicly available and two in-house. Our results show that DeCode significantly outperforms traditional, unconditioned models in terms of generalization to unseen data, achieving higher accuracy at a reduced computational cost. This work represents the first of its kind to explore conditioning strategies in 3D data segmentation, offering a novel and more efficient method for leveraging annotated data. Our code, pre-trained models are publicly available at https:\/\/github.com\/SanoScience\/DeCode.", "title":"Let Me DeCode You: Decoder Conditioning with Tabular Data", "authors":[ "Szczepa\u0144ski, Tomasz", "Grzeszczyk, Michal K.", "P\u0142otka, Szymon", "Adamowicz, Arleta", "Fudalej, Piotr", "Korzeniowski, Przemys\u0142aw", "Trzcin\u0301ski, Tomasz", "Sitek, Arkadiusz" ], "id":"Conference", "arxiv_id":"2407.09437", "GitHub":[ "https:\/\/github.com\/SanoScience\/DeCode" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":645 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1520_paper.pdf", "bibtext":"@InProceedings{ Wan_Structurepreserving_MICCAI2024,\n author = { Wang, Shuxian and Paruchuri, Akshay and Zhang, Zhaoxi and McGill, Sarah and Sengupta, Roni },\n title = { { Structure-preserving Image Translation for Depth Estimation in Colonoscopy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Monocular depth estimation in colonoscopy video aims to overcome the unusual lighting properties of the colonoscopic environment. One of the major challenges in this area is the domain gap between annotated but unrealistic synthetic data and unannotated but realistic clinical data. Previous attempts to bridge this domain gap directly target the depth estimation task itself. We propose a general pipeline of structure-preserving synthetic-to-real (sim2real) image translation (producing a modified version of the input image) to retain depth geometry through the translation process. This allows us to generate large quantities of realistic-looking synthetic images for supervised depth estimation with improved generalization to the clinical domain. We also propose a dataset of hand-picked sequences from clinical colonoscopies to improve the image translation process. We demonstrate the simultaneous realism of the translated images and preservation of depth maps via the performance of downstream depth estimation on various datasets.", "title":"Structure-preserving Image Translation for Depth Estimation in Colonoscopy", "authors":[ "Wang, Shuxian", "Paruchuri, Akshay", "Zhang, Zhaoxi", "McGill, Sarah", "Sengupta, Roni" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "github.com\/sherry97\/struct-preserving-cyclegan" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":646 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0950_paper.pdf", "bibtext":"@InProceedings{ Xu_Dynamic_MICCAI2024,\n author = { Xu, Fangqiang and Tu, Wenxuan and Feng, Fan and Gunawardhana, Malitha and Yang, Jiayuan and Gu, Yun and Zhao, Jichao },\n title = { { Dynamic Position Transformation and Boundary Refinement Network for Left Atrial Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Left atrial (LA) segmentation is a crucial technique for irregular heartbeat (i.e., atrial fibrillation) diagnosis. Most current methods for LA segmentation strictly assume that the input data is acquired using object-oriented center cropping, while this assumption may not always hold in practice due to the high cost of manual object annotation. Random cropping is a straightforward data pre-processing approach. However, it 1) introduces significant irregularities and incompleteness in the input data and 2) disrupts the coherence and continuity of object boundary regions. To tackle these issues, we propose a novel Dynamic Position transformation and Boundary refinement Network (DPBNet). The core idea is to dynamically adjust the relative position of irregular targets to construct their contextual relationships and prioritize difficult boundary pixels to enhance foreground-background distinction. Specifically, we design a shuffle-then-reorder attention module to adjust the position of disrupted objects in the latent space using dynamic generation ratios, such that the vital dependencies among these random cropping targets could be well captured and preserved. Moreover, to improve the accuracy of boundary localization, we introduce a dual fine-grained boundary loss with scenario-adaptive weights to handle the ambiguity of the dual boundary at a fine-grained level, promoting the clarity and continuity of the obtained results. Extensive experimental results on benchmark dataset have demonstrated that DPBNet consistently outperforms existing state-of-the-art methods.", "title":"Dynamic Position Transformation and Boundary Refinement Network for Left Atrial Segmentation", "authors":[ "Xu, Fangqiang", "Tu, Wenxuan", "Feng, Fan", "Gunawardhana, Malitha", "Yang, Jiayuan", "Gu, Yun", "Zhao, Jichao" ], "id":"Conference", "arxiv_id":"2407.05505", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":647 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0527_paper.pdf", "bibtext":"@InProceedings{ Tia_uniGradICON_MICCAI2024,\n author = { Tian, Lin and Greer, Hastings and Kwitt, Roland and Vialard, Fran\u00e7ois-Xavier and San Jos\u00e9 Est\u00e9par, Ra\u00fal and Bouix, Sylvain and Rushmore, Richard and Niethammer, Marc },\n title = { { uniGradICON: A Foundation Model for Medical Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Conventional medical image registration approaches directly optimize over the parameters of a transformation model. These approaches have been highly successful and are used generically for registrations of different anatomical regions. Recent deep registration networks are incredibly fast and accurate but are only trained for specific tasks. Hence, they are no longer generic registration approaches. We therefore propose uniGradICON, a first step toward a foundation model for registration providing 1) great performance across multiple datasets which is not feasible for current learning-based registration methods, 2) zero-shot capabilities for new registration tasks suitable for different acquisitions, anatomical regions, and modalities compared to the training dataset, and 3) a strong initialization for finetuning on out-of-distribution registration tasks. UniGradICON unifies the speed and accuracy benefits of learning-based registration algorithms with the generic applicability of conventional non-deep-learning approaches. We extensively trained and evaluated uniGradICON on twelve different public datasets. Our code and weight are available at https:\/\/github.com\/uncbiag\/uniGradICON.", "title":"uniGradICON: A Foundation Model for Medical Image Registration", "authors":[ "Tian, Lin", "Greer, Hastings", "Kwitt, Roland", "Vialard, Fran\u00e7ois-Xavier", "San Jos\u00e9 Est\u00e9par, Ra\u00fal", "Bouix, Sylvain", "Rushmore, Richard", "Niethammer, Marc" ], "id":"Conference", "arxiv_id":"2403.05780", "GitHub":[ "https:\/\/github.com\/uncbiag\/uniGradICON" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":648 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0845_paper.pdf", "bibtext":"@InProceedings{ Xia_Enhancing_MICCAI2024,\n author = { Xia, Yuexuan and Ma, Benteng and Dou, Qi and Xia, Yong },\n title = { { Enhancing Federated Learning Performance Fairness via Collaboration Graph-based Reinforcement Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning has recently developed into a pivotal distributed learning paradigm, wherein a server aggregates numerous client-trained models into a global model without accessing any client data directly. It is acknowledged that the impact of statistical heterogeneity in client local data on the pace of global model convergence, but it is often underestimated that this heterogeneity also engenders a biased global model with notable variance in accuracy across clients. Contextually, the prevalent solutions entail modifying the optimization objective. However, these solutions often overlook implicit relationships, such as the pairwise distances of site data distributions, which makes pairwise exclusive or synergistic optimization among client models. Such optimization conflicts compromise the efficacy of earlier methods, leading to performance imbalance or even negative transfer. To tackle this issue, we propose a novel aggregation strategy called Collaboration Graph-based Reinforcement Learning (FedGraphRL). By deploying a reinforcement learning (RL) agent equipped with a multi-layer adaptive graph convolutional network (AGCN) on the server-side, we can learn a collaboration graph from client state vectors, revealing the collaborative relationships among clients during optimization. Guided by an introduced reward that balances fairness and performance, the agent allocates aggregation weights, thereby promoting automated decision-making and improvements in fairness. The experimental results on two real-world multi-center medical datasets suggest the effectiveness and superiority of the proposed FedGraphRL.", "title":"Enhancing Federated Learning Performance Fairness via Collaboration Graph-based Reinforcement Learning", "authors":[ "Xia, Yuexuan", "Ma, Benteng", "Dou, Qi", "Xia, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":649 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1851_paper.pdf", "bibtext":"@InProceedings{ Liu_LMUNet_MICCAI2024,\n author = { Liu, Anglin and Jia, Dengqiang and Sun, Kaicong and Meng, Runqi and Zhao, Meixin and Jiang, Yongluo and Dong, Zhijian and Gao, Yaozong and Shen, Dinggang },\n title = { { LM-UNet: Whole-body PET-CT Lesion Segmentation with Dual-Modality-based Annotations Driven by Latent Mamba U-Net } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"PET-CT integrates metabolic information with anatomical structures and plays a vital role in revealing systemic metabolic abnormalities. Automatic segmentation of lesions from whole-body PET-CT could assist diagnostic workflow, support quantitative diagnosis, and increase the detection rate of microscopic lesions. However, automatic lesion segmentation from PET-CT images still faces challenges due to 1) limitations of single-modality-based annotations in public PET-CT datasets, 2) difficulty in distinguishing between pathological and physiological high metabolism, and 3) lack of effective utilization of CT\u2019s structural information. To address these challenges, we propose a threefold strategy. First, we develop an in-house dataset with dual-modality-based annotations to improve clinical applicability; Second, we introduce a model called Latent Mamba U-Net (LM-UNet), to more accurately identify lesions by modeling long-range dependencies; Third, we employ an anatomical enhancement module to better integrate tissue structural features. Experimental results show that our comprehensive framework achieves improved performance over the state-of-the-art methods on both public and in-house datasets, further advancing the development of AI-assisted clinical applications. Our code is available at https:\/\/github.com\/Joey-S-Liu\/LM-UNet.", "title":"LM-UNet: Whole-body PET-CT Lesion Segmentation with Dual-Modality-based Annotations Driven by Latent Mamba U-Net", "authors":[ "Liu, Anglin", "Jia, Dengqiang", "Sun, Kaicong", "Meng, Runqi", "Zhao, Meixin", "Jiang, Yongluo", "Dong, Zhijian", "Gao, Yaozong", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Joey-S-Liu\/LM-UNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":650 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3697_paper.pdf", "bibtext":"@InProceedings{ Guo_FairQuantize_MICCAI2024,\n author = { Guo, Yuanbo and Jia, Zhenge and Hu, Jingtong and Shi, Yiyu },\n title = { { FairQuantize: Achieving Fairness Through Weight Quantization for Dermatological Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent studies have demonstrated that deep learning (DL) models for medical image classification may exhibit biases toward certain demographic attributes such as race, gender, and age. Existing bias mitigation strategies often require sensitive attributes for inference, which may not always be available, or achieve moderate fairness enhancement at the cost of significant accuracy decline. To overcome these obstacles, we propose FairQuantize, a novel approach that ensures fairness by quantizing model weights. We reveal that quantization can be used not as a tool for model compression but as a means to improve model fairness. It is based on the observation that different weights in a model impact performance on various demographic groups differently. FairQuantize selectively quantizes certain weights to enhance fairness while only marginally impacting accuracy. In addition, resulting quantized models can work without sensitive attributes as input. Experimental results on two skin disease datasets demonstrate that FairQuantize can significantly enhance fairness among sensitive attributes while minimizing the impact on overall performance.", "title":"FairQuantize: Achieving Fairness Through Weight Quantization for Dermatological Disease Diagnosis", "authors":[ "Guo, Yuanbo", "Jia, Zhenge", "Hu, Jingtong", "Shi, Yiyu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/guoyb17\/FairQuantize" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":651 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3154_paper.pdf", "bibtext":"@InProceedings{ San_FissionFusion_MICCAI2024,\n author = { Sanjeev, Santosh and Zhaksylyk, Nuren and Almakky, Ibrahim and Hashmi, Anees Ur Rehman and Qazi, Mohammad Areeb and Yaqub, Mohammad },\n title = { { FissionFusion: Fast Geometric Generation and Hierarchical Souping for Medical Image Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The scarcity of well-annotated medical datasets requires leveraging transfer learning from broader datasets like ImageNet or pre-trained models like CLIP. Model soups averages multiple fine-tuned models aiming to improve performance on In-Domain (ID) tasks and enhance robustness on Out-of-Distribution (OOD) datasets. However, applying these methods to the medical imaging domain faces challenges and results in suboptimal performance. This is primarily due to differences in error surface characteristics that stem from data complexities such as heterogeneity, domain shift, class imbalance, and distributional shifts between training and testing phases. To address this issue, we propose a hierarchical merging approach that involves local and global aggregation of models at various levels based on models\u2019 hyperparameter configurations. Furthermore, to alleviate the need for training a large number of models in the hyperparameter search, we introduce a computationally efficient method using a cyclical learning rate scheduler to produce multiple models for aggregation in the weight space. Our method demonstrates significant improvements over the model souping approach across multiple datasets (around 6% gain in HAM10000 and CheXpert datasets) while maintaining low computational costs for model generation and selection. Moreover, we achieve better results on OOD datasets compared to model soups. Code is available at https:\/\/github.com\/BioMedIA-MBZUAI\/FissionFusion.", "title":"FissionFusion: Fast Geometric Generation and Hierarchical Souping for Medical Image Analysis", "authors":[ "Sanjeev, Santosh", "Zhaksylyk, Nuren", "Almakky, Ibrahim", "Hashmi, Anees Ur Rehman", "Qazi, Mohammad Areeb", "Yaqub, Mohammad" ], "id":"Conference", "arxiv_id":"2403.13341", "GitHub":[ "https:\/\/github.com\/BioMedIA-MBZUAI\/FissionFusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":652 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0132_paper.pdf", "bibtext":"@InProceedings{ Wan_3DGPS_MICCAI2024,\n author = { Wang, Ce and Huang, Xiaoyu and Kong, Yaqing and Li, Qian and Hao, You and Zhou, Xiang },\n title = { { 3DGPS: A 3D Differentiable-Gaussian-based Planning Strategy for Liver Tumor Cryoablation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Effective preoperative planning is crucial for successful cryoablation of liver tumors. However, conventional planning methods rely heavily on clinicians\u2019 experience, which may not always lead to an optimal solution due to the intricate 3D anatomical structures and clinical constraints. Lots of planning methods have been proposed, but lack interactivity between multiple probes and are difficult to adapt to diverse clinical scenarios. To bridge the gap, we present a novel 3D Differentiable-Gaussian-based Planning Strategy (3DGPS) for cryoablation of liver tumor considering both the probe interactivity and several clinical constraints. Especially, the problem is formulated to search the minimal circumscribed tumor ablation region, which is generated by multiple 3D ellipsoids, each from one cryoprobe. These ellipsoids are parameterized by the differentiable Gaussians and optimized mainly within two stages, fitting and circumscribing, with formulated clinical constraints in an end-to-end manner. Quantitative and qualitative experiments on LiTS and in-house datasets verify the effectiveness of 3DGPS.", "title":"3DGPS: A 3D Differentiable-Gaussian-based Planning Strategy for Liver Tumor Cryoablation", "authors":[ "Wang, Ce", "Huang, Xiaoyu", "Kong, Yaqing", "Li, Qian", "Hao, You", "Zhou, Xiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":653 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0893_paper.pdf", "bibtext":"@InProceedings{ Zhu_SRECNN_MICCAI2024,\n author = { Zhu, Yuliang and Cheng, Jing and Cui, Zhuo-Xu and Ren, Jianfeng and Wang, Chengbo and Liang, Dong },\n title = { { SRE-CNN: A Spatiotemporal Rotation-Equivariant CNN for Cardiac Cine MR Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Dynamic MR images possess various transformation symmetries, including the rotation symmetry of local features within the image and along the temporal dimension. Utilizing these symmetries as prior knowledge can facilitate dynamic MR imaging with high spatiotemporal resolution. Equivariant CNN is an effective tool to leverage the symmetry priors. However, current equivariant CNN methods fail to fully exploit these symmetry priors in dynamic MR imaging. In this work, we propose a novel framework of Spatiotemporal Rotation-Equivariant CNN (SRE-CNN), spanning from the underlying high-precision filter design to the construction of the temporal-equivariant convolutional module and imaging model, to fully harness the rotation symmetries inherent in dynamic MR images. The temporal-equivariant convolutional module enables exploitation the rotation symmetries in both spatial and temporal dimensions, while the high-precision convolutional filter, based on parametrization strategy, enhances the utilization of rotation symmetry of local features to improve the reconstruction of detailed anatomical structures. Experiments conducted on highly undersampled dynamic cardiac cine data (up to 20X) have demonstrated the superior performance of our proposed approach, both quantitatively and qualitatively.", "title":"SRE-CNN: A Spatiotemporal Rotation-Equivariant CNN for Cardiac Cine MR Imaging", "authors":[ "Zhu, Yuliang", "Cheng, Jing", "Cui, Zhuo-Xu", "Ren, Jianfeng", "Wang, Chengbo", "Liang, Dong" ], "id":"Conference", "arxiv_id":"2409.08537", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":654 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3289_paper.pdf", "bibtext":"@InProceedings{ Mor_MultiVarNet_MICCAI2024,\n author = { Morel, Louis-Oscar and Muzammel, Muhammad and Vin\u00e7on, Nathan and Derang\u00e8re, Valentin and Ladoire, Sylvain and Rittscher, Jens },\n title = { { MultiVarNet - Predicting Tumour Mutational status at the Protein Level } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning research in medical image analysis demonstrated the capability of predicting molecular information, including tumour mutational status, from cell and tissue morphology extracted from standard histology images. While this capability holds the promise of revolutionising pathology, it is of critical importance to go beyond gene-level mutations and develop methodologies capable of predicting precise variant mutations. Only then will it be possible to support important clinical applications, including specific targeted therapies.", "title":"MultiVarNet - Predicting Tumour Mutational status at the Protein Level", "authors":[ "Morel, Louis-Oscar", "Muzammel, Muhammad", "Vin\u00e7on, Nathan", "Derang\u00e8re, Valentin", "Ladoire, Sylvain", "Rittscher, Jens" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":655 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3611_paper.pdf", "bibtext":"@InProceedings{ Has_Myocardial_MICCAI2024,\n author = { Hasny, Marta and Demirel, Omer B. and Amyar, Amine and Faghihroohi, Shahrooz and Nezafat, Reza },\n title = { { Myocardial Scar Enhancement in LGE Cardiac MRI using Localized Diffusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Late gadolinium enhancement (LGE) imaging is considered the gold-standard technique for evaluating myocardial scar\/fibrosis. In LGE, an in-version pulse is played before imaging to create a contrast between healthy and scarred regions. However, several factors can impact the contrast quali-ty, impacting diagnostic interpretation. Furthermore, the quantification of scar burden is highly dependent on image quality. Deep learning-based au-tomated segmentation algorithms often fail when there is no clear boundary between healthy and scarred tissue. This study sought to develop a genera-tive model for improving the contrast of healthy-scarred myocardium in LGE. We propose a localized conditional diffusion model, in which only a region-of-interest (ROI), in this case heart, is subjected to the noising pro-cess, adapting the learning process to the local nature of our proposed en-hancement. The scar-enhanced images, used as training targets, are generated via tissue-specific gamma correction. A segmentation model is trained and used to extract the heart regions. The inference speed is improved by lever-aging partial diffusion, applying noise only up to an intermediate step. Fur-thermore, utilizing the stochastic nature of diffusion models, repeated infer-ence leads to improved scar enhancement of ambiguous regions. The pro-posed algorithm was evaluated using LGE images collected in 929 patients with hypertrophic cardiomyopathy, in a multi-center, multi-vendor study. Our results show visual improvements of scar-healthy myocardium con-trast. To further demonstrate the strength of our method, we evaluate our performance against various image enhancement models where the proposed approach shows higher contrast enhancement. The code is available at: https:\/\/github.com\/HMS-CardiacMR\/Scar_enhancement.", "title":"Myocardial Scar Enhancement in LGE Cardiac MRI using Localized Diffusion", "authors":[ "Hasny, Marta", "Demirel, Omer B.", "Amyar, Amine", "Faghihroohi, Shahrooz", "Nezafat, Reza" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/HMS-CardiacMR\/Scar_enhancement" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":656 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3444_paper.pdf", "bibtext":"@InProceedings{ Wu_Efficient_MICCAI2024,\n author = { Wu, Chenwei and Restrepo, David and Shuai, Zitao and Liu, Zhongming and Shen, Liyue },\n title = { { Efficient In-Context Medical Segmentation with Meta-driven Visual Prompt Selection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"In-context learning with Large Vision Models presents a promising avenue in medical image segmentation by reducing the reliance on extensive labeling. However, the performance of Large Vision Models in in-context learning highly depends on the choices of visual prompts and suffers from domain shifts. While existing works leveraging Large Vision Models for medical tasks have focused mainly on model-centric approaches like fine-tuning, we study an orthogonal data-centric perspective on how to select good visual prompts to facilitate generalization to the medical domain. In this work, we propose a label-efficient in-context medical segmentation method enabled by introducing a novel Meta-driven Visual Prompt Selection (MVPS) mechanism, where a prompt retriever obtained from a meta-learning framework actively selects the optimal images as prompts to promote model performance and generalizability. Evaluated on 8 datasets and 4 tasks across 3 medical imaging modalities, our proposed approach demonstrates consistent gains over existing methods under different scenarios, introducing both computational and label efficiency. Finally, we show that our approach is a flexible, finetuning-free module that could be easily plugged into different backbones and combined with other model-centric approaches.", "title":"Efficient In-Context Medical Segmentation with Meta-driven Visual Prompt Selection", "authors":[ "Wu, Chenwei", "Restrepo, David", "Shuai, Zitao", "Liu, Zhongming", "Shen, Liyue" ], "id":"Conference", "arxiv_id":"2407.11188", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":657 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1325_paper.pdf", "bibtext":"@InProceedings{ Ger_Interpretablebydesign_MICCAI2024,\n author = { Gervelmeyer, Julius and M\u00fcller, Sarah and Djoumessi, Kerol and Merle, David and Clark, Simon J. and Koch, Lisa and Berens, Philipp },\n title = { { Interpretable-by-design Deep Survival Analysis for Disease Progression Modeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the elderly, degenerative diseases often develop differently over time for individual patients. For optimal treatment, physicians and patients would like to know how much time is left for them until symptoms reach a certain stage. However, compared to simple disease detection tasks, disease progression modeling has received much less attention. In addition, most existing models are black-box models which provide little insight into the mechanisms driving the prediction. Here, we introduce an interpretable-by-design survival model to predict the progression of age-related macular degeneration (AMD) from fundus images. Our model not only achieves state-of-the-art prediction performance compared to black-box models but also provides a sparse map of local evidence of AMD progression for individual patients. Our evidence map faithfully reflects the decision-making process of the model in contrast to widely used post-hoc saliency methods. Furthermore, we show that the identified regions mostly align with established clinical AMD progression markers. We believe that our method may help to inform treatment decisions and may lead to better insights into imaging biomarkers indicative of disease progression. The project\u2019s code is available at github.com\/berenslab\/interpretable-deep-survival-analysis.", "title":"Interpretable-by-design Deep Survival Analysis for Disease Progression Modeling", "authors":[ "Gervelmeyer, Julius", "M\u00fcller, Sarah", "Djoumessi, Kerol", "Merle, David", "Clark, Simon J.", "Koch, Lisa", "Berens, Philipp" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/berenslab\/interpretable-deep-survival-analysis" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":658 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1946_paper.pdf", "bibtext":"@InProceedings{ S\u00f8r_Spatiotemporal_MICCAI2024,\n author = { S\u00f8rensen, Kristine and Diez, Paula and Margeta, Jan and El Youssef, Yasmin and Pham, Michael and Pedersen, Jonas Jalili and K\u00fchl, Tobias and de Backer, Ole and Kofoed, Klaus and Camara, Oscar and Paulsen, Rasmus },\n title = { { Spatio-temporal neural distance fields for conditional generative modeling of the heart } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The rhythmic pumping motion of the heart stands as a cornerstone in life, as it circulates blood to the entire human body through a series of carefully timed contractions of the individual chambers. Changes in the size, shape and movement of the chambers can be important markers for cardiac disease and modeling this in relation to clinical demography or disease is therefore of interest. Existing methods for spatio-temporal modeling of the human heart require shape correspondence over time or suffer from large memory requirements, making it difficult to use for complex anatomies. We introduce a novel conditional generative model, where the shape and movement is modeled implicitly in the form of a spatio-temporal neural distance field and conditioned on clinical demography. The model is based on an auto-decoder architecture and aims to disentangle the individual variations from that related to the clinical demography. It is tested on the left atrium (including the left atrial appendage), where it outperforms current state-of-the-art methods for anatomical sequence completion and generates synthetic sequences that realistically mimics the shape and motion of the real left atrium. In practice, this means we can infer functional measurements from a static image, generate synthetic populations with specified demography or disease and investigate how non-imaging clinical data effect the shape and motion of cardiac anatomies.", "title":"Spatio-temporal neural distance fields for conditional generative modeling of the heart", "authors":[ "S\u00f8rensen, Kristine", "Diez, Paula", "Margeta, Jan", "El Youssef, Yasmin", "Pham, Michael", "Pedersen, Jonas Jalili", "K\u00fchl, Tobias", "de Backer, Ole", "Kofoed, Klaus", "Camara, Oscar", "Paulsen, Rasmus" ], "id":"Conference", "arxiv_id":"2407.10663", "GitHub":[ "https:\/\/github.com\/kristineaajuhl\/spatio_temporal_generative_cardiac_model.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":659 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1005_paper.pdf", "bibtext":"@InProceedings{ El_AUniversal_MICCAI2024,\n author = { El Amrani, Nafie and Cao, Dongliang and Bernard, Florian },\n title = { { A Universal and Flexible Framework for Unsupervised Statistical Shape Model Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"We introduce a novel unsupervised deep learning framework for constructing statistical shape models (SSMs). Although unsupervised learning-based 3D shape matching methods have made a major leap forward in recent years, the correspondence quality of existing methods does not meet the demanding requirements necessary for the construction of SSMs of complex anatomical structures. We address this shortcoming by proposing a novel deformation coherency loss to effectively enforce smooth and high-quality correspondences during neural network training. We demonstrate that our framework outperforms existing methods in creating high-quality SSMs by conducting extensive experiments on five challenging datasets with varying anatomical complexities. Our proposed method sets the new state of the art in unsupervised SSM learning, offering a universal solution that is both flexible and reliable. Our source code is publicly available at https:\/\/github.com\/NafieAmrani\/FUSS.", "title":"A Universal and Flexible Framework for Unsupervised Statistical Shape Model Learning", "authors":[ "El Amrani, Nafie", "Cao, Dongliang", "Bernard, Florian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/NafieAmrani\/FUSS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":660 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0338_paper.pdf", "bibtext":"@InProceedings{ Par_Towards_MICCAI2024,\n author = { Park, Jihun and Hong, Jiuk and Yoon, Jihun and Park, Bokyung and Choi, Min-Kook and Jung, Heechul },\n title = { { Towards Precise Pose Estimation in Robotic Surgery: Introducing Occlusion-Aware Loss } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate pose estimation of surgical instruments is crucial for analyzing robotic surgery videos using computer vision techniques. However, the scarcity of suitable public datasets poses a challenge in this regard. To address this issue, we have developed a new private dataset extracted from real gastric cancer surgery videos. The primary objective of our research is to develop a more sophisticated pose estimation algorithm for surgical instruments using this private dataset. Additionally, we introduce a novel loss function aimed at enhancing the accuracy of pose estimation, with a specific emphasis on minimizing root mean squared error. Leveraging the YOLOv8 model, our approach significantly outperforms existing methods and state-of-the-art techniques, thanks to the enhanced occlusion-aware loss function. These findings hold promise for improving the precision and safety of robotic-assisted surgeries.", "title":"Towards Precise Pose Estimation in Robotic Surgery: Introducing Occlusion-Aware Loss", "authors":[ "Park, Jihun", "Hong, Jiuk", "Yoon, Jihun", "Park, Bokyung", "Choi, Min-Kook", "Jung, Heechul" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":661 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3700_paper.pdf", "bibtext":"@InProceedings{ Mat_Ocular_MICCAI2024,\n author = { Matinfar, Sasan and Dehghani, Shervin and Sommersperger, Michael and Faridpooya, Koorosh and Fairhurst, Merle and Navab, Nassir },\n title = { { Ocular Stethoscope: Auditory Support for Retinal Membrane Peeling } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"The peeling of an epiretinal membrane (ERM) is a complex procedure wherein a membrane, only a few micrometers thick, that develops on the retinal surface is delicately removed using microsurgical forceps. Insights regarding small gaps between the ERM and the retinal tissue are valuable for surgical decision-making, particularly in determining a suitable location to initiate the peeling. Depth-resolved imaging of the retina provided by intraoperative Optical Coherence Tomography (iOCT) enables visualization of this gap and supports decision-making.\nThe common presentation of iOCT images during surgery in juxtaposition with the microscope view however requires surgeons to move their gaze from the surgical site, affecting proprioception and cognitive load. In this work, we introduce an alternative method utilizing auditory feedback as a sensory channel, designed to intuitively enhance the perception of ERM elevations. Our approach establishes an unsupervised innovative mapping between real-time OCT A-scans and the parameters of an acoustic model. This acoustic model conveys the physical characteristics of tissue structure through distinctive sound textures, at microtemporal resolution. Our experiments show that even subtle ERM elevations can be sonified. Expert clinician feedback confirms the impact of our method and an initial user study with 15 participants demonstrates the potential to perceive the gap between the ERM and the retinal tissue exclusively through auditory cues.", "title":"Ocular Stethoscope: Auditory Support for Retinal Membrane Peeling", "authors":[ "Matinfar, Sasan", "Dehghani, Shervin", "Sommersperger, Michael", "Faridpooya, Koorosh", "Fairhurst, Merle", "Navab, Nassir" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":662 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3212_paper.pdf", "bibtext":"@InProceedings{ Al_IMMoCo_MICCAI2024,\n author = { Al-Haj Hemidi, Ziad and Weihsbach, Christian and Heinrich, Mattias P. },\n title = { { IM-MoCo: Self-supervised MRI Motion Correction using Motion-Guided Implicit Neural Representations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Motion artifacts in Magnetic Resonance Imaging (MRI) arise due to relatively long acquisition times and can compromise the clinical utility of acquired images. Traditional motion correction methods often fail to address severe motion, leading to distorted and unreliable results. Deep Learning (DL) alleviated such pitfalls through generalization with the cost of vanishing structures and hallucinations, making it challenging to apply in the medical field where hallucinated structures can tremendously impact the diagnostic outcome. In this work, we present an instance-wise motion correction pipeline that leverages motion-guided Implicit Neural Representations (INRs) to mitigate the impact of motion artifacts while retaining anatomical structure. Our method is evaluated using the NYU fastMRI dataset with different degrees of simulated motion severity. For the correction alone, we can improve over state-of-the-art image reconstruction methods by +5% SSIM, +5 db PSNR, and +14% HaarPSI. Clinical relevance is demonstrated by a subsequent experiment, where our method improves classification outcomes by at least +1.5 accuracy percentage points compared to motion-corrupted images.", "title":"IM-MoCo: Self-supervised MRI Motion Correction using Motion-Guided Implicit Neural Representations", "authors":[ "Al-Haj Hemidi, Ziad", "Weihsbach, Christian", "Heinrich, Mattias P." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/multimodallearning\/MICCAI24_IMMoCo.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":663 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1551_paper.pdf", "bibtext":"@InProceedings{ Ola_SpeChrOmics_MICCAI2024,\n author = { Oladokun, Ajibola S. and Malila, Bessie and Campello, Victor M. and Shey, Muki and Mutsvangwa, Tinashe E. M. },\n title = { { SpeChrOmics: A Biomarker Characterization Framework for Medical Hyperspectral Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose SpeChrOmics, a characterization framework for generating potential biomarkers of pathologies from hyperspectral images of body tissue. We test our model using a novel clinical application \u2013 hyperspectral imaging for the diagnosis of latent tuberculosis infection (LTBI). This is a neglected disease state predominantly prevalent in sub-Saharan Africa. Our model identified water, deoxyhemoglobin, and pheomelanin as potential chromophore biomarkers for LTBI with mean cross validation accuracy of 96%. Our framework can potentially be used for pathology characterization in other medical applications.", "title":"SpeChrOmics: A Biomarker Characterization Framework for Medical Hyperspectral Imaging", "authors":[ "Oladokun, Ajibola S.", "Malila, Bessie", "Campello, Victor M.", "Shey, Muki", "Mutsvangwa, Tinashe E. M." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":664 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3085_paper.pdf", "bibtext":"@InProceedings{ Mou_Evaluating_MICCAI2024,\n author = { Mouheb, Kaouther and Elbatel, Marawan and Klein, Stefan and Bron, Esther E. },\n title = { { Evaluating the Fairness of Neural Collapse in Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has achieved impressive performance across various medical imaging tasks. However, its inherent bias against specific groups hinders its clinical applicability in equitable healthcare systems. A recently discovered phenomenon, Neural Collapse (NC), has shown potential in improving the generalization of state-of-the-art deep learning models. Nonetheless, its implications on bias in medical imaging remain unexplored. Our study investigates deep learning fairness through the lens of NC. We analyze the training dynamics of models as they approach NC when training using biased datasets, and examine the subsequent impact on test performance, specifically focusing on label bias. We find that biased training initially results in different NC configurations across subgroups, before converging to a final NC solution by memorizing all data samples. Through extensive experiments on three medical imaging datasets\u2014PAPILA, HAM10000, and CheXpert\u2014we find that in biased settings, NC can lead to a significant drop in F1 score across all subgroups. Our code is available at https:\/\/gitlab.com\/radiology\/neuro\/neural-collapse-fairness.", "title":"Evaluating the Fairness of Neural Collapse in Medical Image Classification", "authors":[ "Mouheb, Kaouther", "Elbatel, Marawan", "Klein, Stefan", "Bron, Esther E." ], "id":"Conference", "arxiv_id":"2407.05843", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":665 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1022_paper.pdf", "bibtext":"@InProceedings{ Liu_PAMIL_MICCAI2024,\n author = { Liu, Jiashuai and Mao, Anyu and Niu, Yi and Zhang, Xianli and Gong, Tieliang and Li, Chen and Gao, Zeyu },\n title = { { PAMIL: Prototype Attention-based Multiple Instance Learning for Whole Slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Digital pathology images are not only crucial for diagnosing cancer but also play a significant role in treatment planning, and research into disease mechanisms. The multiple instance learning (MIL) technique provides an effective weakly-supervised methodology for analyzing gigapixel Whole Slide Image (WSI). Recent advancements in MIL approaches have predominantly focused on predicting a singular diagnostic label for each WSI, simultaneously enhancing interpretability via attention mechanisms. However, given the heterogeneity of tumors, each WSI may contain multiple histotypes. Also, the generated attention maps often fail to offer a comprehensible explanation of the underlying reasoning process. These constraints limit the potential applicability of MIL-based methods in clinical settings. In this paper, we propose a Prototype Attention-based Multiple Instance Learning (PAMIL) method, designed to improve the model\u2019s reasoning interpretability without compromising its classification performance at the WSI level. PAMIL merges prototype learning with attention mechanisms, enabling the model to quantify the similarity between prototypes and instances, thereby providing the interpretability at instance level. Specifically, two branches are equipped in PAMIL, providing prototype and instance-level attention scores, which are aggregated to derive bag-level predictions. Extensive experiments are conducted on four datasets with two diverse WSI classification tasks, demonstrating the effectiveness and interpretability of our PAMIL.", "title":"PAMIL: Prototype Attention-based Multiple Instance Learning for Whole Slide Image Classification", "authors":[ "Liu, Jiashuai", "Mao, Anyu", "Niu, Yi", "Zhang, Xianli", "Gong, Tieliang", "Li, Chen", "Gao, Zeyu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Jiashuai-Liu\/PAMIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":666 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3403_paper.pdf", "bibtext":"@InProceedings{ He_PitVQA_MICCAI2024,\n author = { He, Runlong and Xu, Mengya and Das, Adrito and Khan, Danyal Z. and Bano, Sophia and Marcus, Hani J. and Stoyanov, Danail and Clarkson, Matthew J. and Islam, Mobarakol },\n title = { { PitVQA: Image-grounded Text Embedding LLM for Visual Question Answering in Pituitary Surgery } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Visual Question Answering (VQA) within the surgical domain, utilizing Large Language Models (LLMs), offers a distinct opportunity to improve intra-operative decision-making and facilitate intuitive surgeon-AI interaction. However, the development of LLMs for surgical VQA is hindered by the scarcity of diverse and extensive datasets with complex reasoning tasks. Moreover, contextual fusion of the image and text modalities remains an open research challenge due to the inherent differences between these two types of information and the complexity involved in aligning them. This paper introduces PitVQA, a novel dataset specifically designed for VQA in endonasal pituitary surgery and PitVQA-Net, an adaptation of the GPT2 with a novel image-grounded text embedding for surgical VQA. PitVQA comprises 25 procedural videos and a rich collection of question-answer pairs spanning crucial surgical aspects such as phase and step recognition, context understanding, tool detection and localization, and tool-tissue interactions. PitVQA-Net consists of a novel image-grounded text embedding that projects image and text features into a shared embedding space and GPT2 Backbone with an excitation block classification head to generate contextually relevant answers within the complex domain of endonasal pituitary surgery. Our image-grounded text embedding leverages joint embedding, cross-attention and contextual representation to understand the contextual relationship between questions and surgical images. We demonstrate the effectiveness of PitVQA-Net on both the PitVQA and the publicly available EndoVis18-VQA dataset, achieving improvements in balanced accuracy of 8% and 9% over the most recent baselines, respectively.", "title":"PitVQA: Image-grounded Text Embedding LLM for Visual Question Answering in Pituitary Surgery", "authors":[ "He, Runlong", "Xu, Mengya", "Das, Adrito", "Khan, Danyal Z.", "Bano, Sophia", "Marcus, Hani J.", "Stoyanov, Danail", "Clarkson, Matthew J.", "Islam, Mobarakol" ], "id":"Conference", "arxiv_id":"2405.13949", "GitHub":[ "https:\/\/github.com\/mobarakol\/PitVQA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":667 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1549_paper.pdf", "bibtext":"@InProceedings{ Wei_Representing_MICCAI2024,\n author = { Wei, Ziquan and Dan, Tingting and Ding, Jiaqi and Laurienti, Paul and Wu, Guorong },\n title = { { Representing Functional Connectivity with Structural Detour: A New Perspective to Decipher Structure-Function Coupling Mechanism } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Modern neuroimaging technologies set the stage for studying structural connectivity (SC) and functional connectivity (FC) \\textit{in-vivo}. Due to distinct biological wiring underpinnings in SC and FC, however, it is challenging to understand their coupling mechanism using statistical association approaches. We seek to answer this challenging neuroscience question through the lens of a novel perspective rooted in network topology. Specifically, our assumption is that each FC instance is either locally supported by the direct link of SC or collaboratively sustained by a group of alternative SC pathways which form a topological notion of \\textit{detour}. In this regard, we propose a new connectomic representation, coined detour connectivity (DC), to characterize the complex relationship between SC and FC by presenting direct FC with the weighted connectivity strength along in-directed SC routes. Furthermore, we present SC-FC Detour Network (SFDN), a graph neural network that integrates DC embedding through a self-attention mechanism, to optimize detour to the extent that the coupling of SC and FC is closely aligned with the evolution of cognitive states.\nWe have applied the concept of DC in network community detection while the clinical value of our SFDN is evaluated in cognitive task recognition and early diagnosis of Alzheimer\u2019s disease. After benchmarking on three public datasets under various brain parcellations, our detour-based computational approach shows significant improvement over current state-of-the-art counterpart methods.", "title":"Representing Functional Connectivity with Structural Detour: A New Perspective to Decipher Structure-Function Coupling Mechanism", "authors":[ "Wei, Ziquan", "Dan, Tingting", "Ding, Jiaqi", "Laurienti, Paul", "Wu, Guorong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Chrisa142857\/SC-FC-Detour" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":668 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1085_paper.pdf", "bibtext":"@InProceedings{ Zha_Heteroscedastic_MICCAI2024,\n author = { Zhang, Xiaoran and Pak, Daniel H. and Ahn, Shawn S. and Li, Xiaoxiao and You, Chenyu and Staib, Lawrence H. and Sinusas, Albert J. and Wong, Alex and Duncan, James S. },\n title = { { Heteroscedastic Uncertainty Estimation Framework for Unsupervised Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning methods for unsupervised registration often rely on objectives that assume a uniform noise level across the spatial domain (e.g. mean-squared error loss), but noise distributions are often heteroscedastic and input-dependent in real-world medical images. Thus, this assumption often leads to degradation in registration performance, mainly due to the undesired influence of noise-induced outliers. To mitigate this, we propose a framework for heteroscedastic image uncertainty estimation that can adaptively reduce the influence of regions with high uncertainty during unsupervised registration. The framework consists of a collaborative training strategy for the displacement and variance estimators, and a novel image fidelity weighting scheme utilizing signal-to-noise ratios. Our approach prevents the model from being driven away by spurious gradients caused by the simplified homoscedastic assumption, leading to more accurate displacement estimation. To illustrate its versatility and effectiveness, we tested our framework on two representative registration architectures across three medical image datasets. Our method consistently outperforms baselines and produces sensible uncertainty estimates. The code is publicly available at \\url{https:\/\/voldemort108x.github.io\/hetero_uncertainty\/}.", "title":"Heteroscedastic Uncertainty Estimation Framework for Unsupervised Registration", "authors":[ "Zhang, Xiaoran", "Pak, Daniel H.", "Ahn, Shawn S.", "Li, Xiaoxiao", "You, Chenyu", "Staib, Lawrence H.", "Sinusas, Albert J.", "Wong, Alex", "Duncan, James S." ], "id":"Conference", "arxiv_id":"2312.00836", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":669 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2715_paper.pdf", "bibtext":"@InProceedings{ Sun_ANew_MICCAI2024,\n author = { Sun, Minghao and Zhou, Tian and Jiang, Chenghui and Lv, Xiaodan and Yu, Han },\n title = { { A New Cine-MRI Segmentation Method of Tongue Dorsum for Postoperative Swallowing Function Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Advantages of cine-MRI include high spatial-temporal resolution and free radia-tion, and the technique has become a new method for analyzing and assessing the swallowing function of patients with head and neck tumors. To reduce the labor work of physicians and improve the robustness of labeling the cine-MRI images, we propose a new swallowing analysis method based on a revised cine-MRI segmentation model. This method aims to automate the calculation of tongue dor-sum motion parameters in the oral and pharyngeal phases of swallowing, fol-lowed by a quantitative analysis. Firstly, based on manually annotated swallow-ing structures, we propose a method for calculating tongue dorsum motion pa-rameters, which enables the quantitative analysis of swallowing capability. Sec-ondly, a spatial-temporal hybrid model composed of convolution and temporal transformer is proposed to extract the tongue dorsum mask sequence from a swallowing cycle MRI sequence. Finally, to fully exploit the advantages of cine-MRI, a Multi-head Temporal Self-Attention (MTSA) mechanism is introduced, which establishes connections among frames and enhances the segmentation re-sults of individual frames. A Temporal Relative Positional Encoding (TRPE) is designed to incorporate the temporal information of different swallowing stages into the network, which enhances the network\u2019s understanding of the swallowing process. Experimental results show that the proposed segmentation model achieves a 1.45% improvement in Dice Score compared to the state-of-the-art methods, and the interclass correlation coefficient (ICC) of the displacement data of swallowing feature points obtained respectively from the model mask and physician annotation exceeds 90%. Our code is available at: https:\/\/github.com\/MinghaoSam\/SwallowingFunctionAnalysis.", "title":"A New Cine-MRI Segmentation Method of Tongue Dorsum for Postoperative Swallowing Function Analysis", "authors":[ "Sun, Minghao", "Zhou, Tian", "Jiang, Chenghui", "Lv, Xiaodan", "Yu, Han" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MinghaoSam\/SwallowingFunctionAnalysis" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":670 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0817_paper.pdf", "bibtext":"@InProceedings{ Zha_Highresolution_MICCAI2024,\n author = { Zhang, Wei and Hui, Tik Ho and Tse, Pui Ying and Hill, Fraser and Lau, Condon and Li, Xinyue },\n title = { { High-resolution Medical Image Translation via Patch Alignment-Based Bidirectional Contrastive Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Pathology image assessment plays a crucial role in disease diagnosis and treatment. In this study, we propose a Patch alignment-based Paired medical image-to-image Translation (PPT) model that takes the Hematoxylin and Eosin (H&E) stained image as input and generates the corresponding Immunohistochemistry (IHC) stained image in seconds, which can bypass the laborious and time-consuming procedures of IHC staining and facilitate timely and accurate pathology assessment. First, our proposed PPT model introduces FocalNCE loss in patch-wise bidirectional contrastive learning to ensure high consistency between input and output images. Second, we propose a novel patch alignment loss to address the commonly observed misalignment issue in paired medical image datasets. Third, we incorporate content and frequency loss to produce IHC stained images with finer details. Extensive experiments show that our method outperforms state-of-the-art methods, demonstrates clinical utility in pathology expert evaluation using our dataset and achieves competitive performance in two public breast cancer datasets. Lastly, we release our H&E to IHC image Translation (HIT) dataset of canine lymphoma with paired H&E-CD3 and H&E-PAX5 images, which is the first paired pathological image dataset with a high resolution of 2048\u00d72048. Our code and dataset are available at https:\/\/github.com\/coffeeNtv\/PPT.", "title":"High-resolution Medical Image Translation via Patch Alignment-Based Bidirectional Contrastive Learning", "authors":[ "Zhang, Wei", "Hui, Tik Ho", "Tse, Pui Ying", "Hill, Fraser", "Lau, Condon", "Li, Xinyue" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/coffeeNtv\/PPT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":671 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3665_paper.pdf", "bibtext":"@InProceedings{ Bha_Analyzing_MICCAI2024,\n author = { Bhattarai, Ashuta and Jin, Jing and Kambhamettu, Chandra },\n title = { { Analyzing Adjacent B-Scans to Localize Sickle Cell Retinopathy In OCTs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Imaging modalities, such as Optical coherence tomography (OCT), are one of the core components of medical image diagnosis. Deep learning-based object detection and segmentation models have proven efficient and reliable in this field. OCT images have been extensively used in deep learning-based applications, such as retinal layer segmentation and retinal disease detection for conditions such as age-related macular degeneration (AMD) and diabetic macular edema (DME). However, sickle-cell retinopathy (SCR) has yet to receive significant research attention in the deep-learning community, despite its detrimental effects. To address this gap, we present a new detection network called the Cross Scan Attention Transformer (CSAT), which is specifically designed to identify minute irregularities such as SCR in cross-sectional images such as OCTs. Our method employs a contrastive learning framework to pre-train OCT images and a transformer-based detection network that takes advantage of the volumetric nature of OCT scans. Our research demonstrates the effectiveness of the proposed network in detecting SCR from OCT images, with superior results compared to popular object detection networks such as Faster-RCNN and Detection Transformer (DETR). Our code can be found in github.com\/VimsLab\/CSAT.", "title":"Analyzing Adjacent B-Scans to Localize Sickle Cell Retinopathy In OCTs", "authors":[ "Bhattarai, Ashuta", "Jin, Jing", "Kambhamettu, Chandra" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/VimsLab\/CSAT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":672 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2298_paper.pdf", "bibtext":"@InProceedings{ Bon_Gaussian_MICCAI2024,\n author = { Bonilla, Sierra and Zhang, Shuai and Psychogyios, Dimitrios and Stoyanov, Danail and Vasconcelos, Francisco and Bano, Sophia },\n title = { { Gaussian Pancakes: Geometrically-Regularized 3D Gaussian Splatting for Realistic Endoscopic Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Within colorectal cancer diagnostics, conventional colonoscopy techniques face critical limitations, including a limited field of view and a lack of depth information, which can impede the detection of pre- cancerous lesions. Current methods struggle to provide comprehensive and accurate 3D reconstructions of the colonic surface which can help minimize the missing regions and reinspection for pre-cancerous polyps. Addressing this, we introduce \u201cGaussian Pancakes\u201d, a method that lever- ages 3D Gaussian Splatting (3D GS) combined with a Recurrent Neural Network-based Simultaneous Localization and Mapping (RNNSLAM) system. By introducing geometric and depth regularization into the 3D GS framework, our approach ensures more accurate alignment of Gaussians with the colon surface, resulting in smoother 3D reconstructions with novel viewing of detailed textures and structures. Evaluations across three diverse datasets show that Gaussian Pancakes enhances novel view synthesis quality, surpassing current leading methods with a 18% boost in PSNR and a 16% improvement in SSIM. It also delivers over 100\u00d7 faster rendering and more than 10\u00d7 shorter training times, making it a practical tool for real-time applications. Hence, this holds promise for achieving clinical translation for better detection and diagnosis of colorectal cancer.", "title":"Gaussian Pancakes: Geometrically-Regularized 3D Gaussian Splatting for Realistic Endoscopic Reconstruction", "authors":[ "Bonilla, Sierra", "Zhang, Shuai", "Psychogyios, Dimitrios", "Stoyanov, Danail", "Vasconcelos, Francisco", "Bano, Sophia" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/smbonilla\/GaussianPancakes" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":673 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2565_paper.pdf", "bibtext":"@InProceedings{ Sun_LOMIAT_MICCAI2024,\n author = { Sun, Yuchen and Li, Kunwei and Chen, Duanduan and Hu, Yi and Zhang, Shuaitong },\n title = { { LOMIA-T: A Transformer-based LOngitudinal Medical Image Analysis framework for predicting treatment response of esophageal cancer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models based on medical images have made significant strides in predicting treatment outcomes. However, previous methods have primarily concentrated on single time-point images, neglecting the temporal dynamics and changes inherent in longitudinal medical images. Thus, we propose a Transformer-based longitudinal image analysis framework (LOMIA-T) to contrast and fuse latent representations from pre- and post-treatment medical images for predicting treatment response. Specifically, we first design a treatment response-based contrastive loss to enhance latent representation by discerning evolutionary processes across various disease stages. Then, we integrate latent representations from pre- and post-treatment CT images using a cross-attention mechanism. Considering the redundancy in the dual-branch output features induced by the cross-attention mechanism, we propose a clinically interpretable feature fusion strategy to predict treatment response. Experimentally, the proposed framework outperforms several state-of-the-art longitudinal image analysis methods on an in-house Esophageal Squamous Cell Carcinoma (ESCC) dataset, encompassing 170 pre- and post-treatment contrast-enhanced CT image pairs from ESCC patients underwent neoadjuvant chemoradiotherapy. Ablation experiments validate the efficacy of the proposed treatment response-based contrastive loss and feature fusion strategy. The codes will be made available at https:\/\/github.com\/syc19074115\/LOMIA-T.", "title":"LOMIA-T: A Transformer-based LOngitudinal Medical Image Analysis framework for predicting treatment response of esophageal cancer", "authors":[ "Sun, Yuchen", "Li, Kunwei", "Chen, Duanduan", "Hu, Yi", "Zhang, Shuaitong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":674 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3105_paper.pdf", "bibtext":"@InProceedings{ Li_FairDiff_MICCAI2024,\n author = { Li, Wenyi and Xu, Haoran and Zhang, Guiyu and Gao, Huan-ang and Gao, Mingju and Wang, Mengyu and Zhao, Hao },\n title = { { FairDiff: Fair Segmentation with Point-Image Diffusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fairness is an important topic for medical image analysis, driven by the challenge of unbalanced training data among diverse target groups and the societal demand for equitable medical quality. In response to this issue, our research adopts a data-driven strategy\u2014enhancing data balance by integrating synthetic images. However, in terms of generating synthetic images, previous works either lack paired labels or fail to precisely control the boundaries of synthetic images to be aligned with those labels. To address this, we formulate the problem in a joint optimization manner, in which three networks are optimized towards the goal of empirical risk minimization and fairness maximization. On the implementation side, our solution features an innovative Point-Image Diffusion architecture, which leverages 3D point clouds for improved control over mask boundaries through a point-mask-image synthesis pipeline. This method outperforms significantly existing techniques in synthesizing scanning laser ophthalmoscopy (SLO) fundus images. By combining synthetic data with real data during the training phase using a proposed Equal Scale approach, our model achieves superior fairness segmentation performance compared to the state-of-the-art fairness learning models. Code is available at https:\/\/github.com\/wenyi-li\/FairDiff.", "title":"FairDiff: Fair Segmentation with Point-Image Diffusion", "authors":[ "Li, Wenyi", "Xu, Haoran", "Zhang, Guiyu", "Gao, Huan-ang", "Gao, Mingju", "Wang, Mengyu", "Zhao, Hao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/wenyi-li\/FairDiff" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":675 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0300_paper.pdf", "bibtext":"@InProceedings{ Lei_Epicardium_MICCAI2024,\n author = { Lei, Long and Zhou, Jun and Pei, Jialun and Zhao, Baoliang and Jin, Yueming and Teoh, Yuen-Chun Jeremy and Qin, Jing and Heng, Pheng-Ann },\n title = { { Epicardium Prompt-guided Real-time Cardiac Ultrasound Frame-to-volume Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Real-time fusion of intraoperative 2D ultrasound images and the preoperative 3D ultrasound volume based on the frame-to-volume registration can provide a comprehensive guidance view for cardiac interventional surgery. However, cardiac ultrasound images are characterized by a low signal-to-noise ratio and small differences between adjacent frames, coupled with significant dimension variations between 2D frames and 3D volumes to be registered, resulting in real-time and accurate cardiac ultrasound frame-to-volume registration being a very challenging task. This paper introduces a lightweight end-to-end Cardiac Ultrasound frame-to-volume Registration network, termed CU-Reg. Specifically, the proposed model leverages epicardium prompt-guided anatomical clues to reinforce the interaction of 2D sparse and 3D dense features, followed by a voxel-wise local-global aggregation of enhanced features, thereby boosting the cross-dimensional matching effectiveness of low-quality ultrasound modalities. We further embed an inter-frame discriminative regularization term within the hybrid supervised learning to increase the distinction between adjacent slices in the same ultrasound volume to ensure registration stability. Experimental results on the reprocessed CAMUS dataset demonstrate that our CU-Reg surpasses existing methods in terms of registration accuracy and efficiency, meeting the guidance requirements of clinical cardiac interventional surgery. Our code is available at https:\/\/github.com\/LLEIHIT\/CU-Reg.", "title":"Epicardium Prompt-guided Real-time Cardiac Ultrasound Frame-to-volume Registration", "authors":[ "Lei, Long", "Zhou, Jun", "Pei, Jialun", "Zhao, Baoliang", "Jin, Yueming", "Teoh, Yuen-Chun Jeremy", "Qin, Jing", "Heng, Pheng-Ann" ], "id":"Conference", "arxiv_id":"2406.14534", "GitHub":[ "https:\/\/github.com\/LLEIHIT\/CU-Reg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":676 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2459_paper.pdf", "bibtext":"@InProceedings{ Mej_Enhancing_MICCAI2024,\n author = { Mejia, Gabriel and Ruiz, Daniela and C\u00e1rdenas, Paula and Manrique, Leonardo and Vega, Daniela and Arbel\u00e1ez, Pablo },\n title = { { Enhancing Gene Expression Prediction from Histology Images with Spatial Transcriptomics Completion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Spatial Transcriptomics is a novel technology that aligns histology images with spatially resolved gene expression profiles. Although groundbreaking, it struggles with gene capture yielding high corruption in acquired data. Given potential applications, recent efforts have focused on predicting transcriptomic profiles solely from histology images. However, differences in databases, preprocessing techniques, and training hyperparameters hinder a fair comparison between methods. To address these challenges, we present a systematically curated and processed database collected from 26 public sources, representing an 8.6-fold increase compared to previous works. Additionally, we propose a state-of-the-art transformer-based completion technique for inferring missing\ngene expression, which significantly boosts the performance of transcriptomic profile predictions across all datasets. Altogether, our contributions constitute the most comprehensive benchmark of gene expression prediction from histology images to date and a stepping stone for future research on spatial transcriptomics.", "title":"Enhancing Gene Expression Prediction from Histology Images with Spatial Transcriptomics Completion", "authors":[ "Mejia, Gabriel", "Ruiz, Daniela", "C\u00e1rdenas, Paula", "Manrique, Leonardo", "Vega, Daniela", "Arbel\u00e1ez, Pablo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/BCV-Uniandes\/SpaRED" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":677 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2248_paper.pdf", "bibtext":"@InProceedings{ Bra_BackMix_MICCAI2024,\n author = { Bransby, Kit M. and Beqiri, Arian and Cho Kim, Woo-Jin and Oliveira, Jorge and Chartsias, Agisilaos and Gomez, Alberto },\n title = { { BackMix: Mitigating Shortcut Learning in Echocardiography with Minimal Supervision } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neural networks can learn spurious correlations that lead to the correct prediction in a validation set, but generalise poorly because the predictions are right for the wrong reason. This undesired learning of naive shortcuts (Clever Hans effect) can happen for example in echocardiogram view classification when background cues (e.g. metadata) are biased towards a class and the model learns to focus on those background features instead of on the image content. We propose a simple, yet effective random background augmentation method called BackMix, which samples random backgrounds from other examples in the training set. By enforcing the background to be uncorrelated with the outcome, the model learns to focus on the data within the ultrasound sector and becomes invariant to the regions outside this. We extend our method in a semi-supervised setting, finding that the positive effects of BackMix are maintained with as few as 5% of segmentation labels. A loss weighting mechanism, wBackMix, is also proposed to increase the contribution of the augmented examples. We validate our method on both in-distribution and out-of-distribution datasets, demonstrating significant improvements in classification accuracy, region focus and generalisability. Our source code is available at: https:\/\/github.com\/kitbransby\/BackMix", "title":"BackMix: Mitigating Shortcut Learning in Echocardiography with Minimal Supervision", "authors":[ "Bransby, Kit M.", "Beqiri, Arian", "Cho Kim, Woo-Jin", "Oliveira, Jorge", "Chartsias, Agisilaos", "Gomez, Alberto" ], "id":"Conference", "arxiv_id":"2406.19148", "GitHub":[ "https:\/\/github.com\/kitbransby\/BackMix" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":678 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3335_paper.pdf", "bibtext":"@InProceedings{ Hu_Boosting_MICCAI2024,\n author = { Hu, Yihuang and Peng, Qiong and Du, Zhicheng and Zhang, Guojun and Wu, Huisi and Liu, Jingxin and Chen, Hao and Wang, Liansheng },\n title = { { Boosting FFPE-to-HE Virtual Staining with Cell Semantics from Pretrained Segmentation Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Histopathological samples are typically processed by formalin fixation and paraffin embedding (FFPE) for long-term preservation. To visualize the blurry structures of cells and tissue in FFPE slides, hematoxylin and eosin (HE) staining is commonly utilized, a process that involves sophisticated laboratory facilities and complicated procedures. Recently, virtual staining realized by generative models has been widely utilized. The blurry cell structure in FFPE slides poses challenges to well-studied FFPE-to-HE virtual staining. However, most existing researches overlook this issue. In this paper, we propose a framework for boosting FFPE-to-HE virtual staining with cell semantics from pretrained cell segmentation models (PCSM) as the well-trained PCSM has learned effective representation for cell structure, which contains richer cell semantics than that from a generative model. Thus, we learn from PCSM by utilizing the high-level and low-level semantics of real and virtual images. Specifically, We propose to utilize PCSM to extract multiple-scale latent representations from real and virtual images and align them. Moreover, we introduce the low-level cell location guidance for generative models, informed by PCSM. We conduct extensive experiments on our collected dataset. The results demonstrate a significant improvement of our method over the existing network qualitatively and quantitatively. Code is available at https:\/\/github.com\/huyihuang\/FFPE-to-HE.", "title":"Boosting FFPE-to-HE Virtual Staining with Cell Semantics from Pretrained Segmentation Model", "authors":[ "Hu, Yihuang", "Peng, Qiong", "Du, Zhicheng", "Zhang, Guojun", "Wu, Huisi", "Liu, Jingxin", "Chen, Hao", "Wang, Liansheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":679 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3885_paper.pdf", "bibtext":"@InProceedings{ Cha_Baikal_MICCAI2024,\n author = { Chaudhary, Shivesh and Sankarapandian, Sivaramakrishnan and Sooknah, Matt and Pai, Joy and McCue, Caroline and Chen, Zhenghao and Xu, Jun },\n title = { { Baikal: Unpaired Denoising of Fluorescence Microscopy Images using Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fluorescence microscopy is an indispensable tool for biological discovery but image quality is constrained by desired spatial and temporal resolution, sample sensitivity, and other factors. Computational denoising methods can bypass imaging constraints and improve signal-to-noise ratio in images. However, current state of the art methods are commonly trained in a supervised manner, requiring paired noisy and clean images, limiting their application across diverse datasets. An alternative class of denoising models can be trained in a self-supervised manner, assuming independent noise across samples but are unable to generalize from available unpaired clean images. A method that can be trained without paired data and can use information from available unpaired high-quality images would address both weaknesses. Here, we present Baikal, a first attempt to formulate such a framework using Denoising Diffusion Probabilistic Models (DDPM) for fluorescence microscopy images. We first train a DDPM backbone in an unconditional manner to learn generative priors over complex morphologies in microscopy images, we can then apply various conditioning strategies to sample from the trained model and propose optimal strategy to denoise the desired image. Extensive quantitative comparisons demonstrate better performance of Baikal over state of the art self-supervised methods across multiple datasets. We highlight the advantage of generative priors learnt by DDPMs in denoising complex Flywing morphologies where other methods fail. Overall, our DDPM based denoising framework presents a new class of denoising method for fluorescence microscopy datasets that achieve good performance without collection of paired high-quality images.", "title":"Baikal: Unpaired Denoising of Fluorescence Microscopy Images using Diffusion Models", "authors":[ "Chaudhary, Shivesh", "Sankarapandian, Sivaramakrishnan", "Sooknah, Matt", "Pai, Joy", "McCue, Caroline", "Chen, Zhenghao", "Xu, Jun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/scelesticsiva\/denoising" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":680 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0519_paper.pdf", "bibtext":"@InProceedings{ Xin_OntheFly_MICCAI2024,\n author = { Xin, Yuelin and Chen, Yicheng and Ji, Shengxiang and Han, Kun and Xie, Xiaohui },\n title = { { On-the-Fly Guidance Training for Medical Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study introduces a novel On-the-Fly Guidance (OFG) training framework for enhancing existing learning-based image registration models, addressing the limitations of weakly-supervised and unsupervised methods. Weakly-supervised methods struggle due to the scarcity of labeled data, and unsupervised methods directly depend on image similarity metrics for accuracy. Our method proposes a supervised fashion for training registration models, without the need for any labeled data. OFG generates pseudo-ground truth during training by refining deformation predictions with a differentiable optimizer, enabling direct supervised learning. OFG optimizes deformation predictions efficiently, improving the performance of registration models without sacrificing inference speed. Our method is tested across several benchmark datasets and leading models, it significantly enhanced performance, providing a plug-and-play solution for training learning-based registration models. Code available at: https:\/\/github.com\/cilix-ai\/on-the-fly-guidance", "title":"On-the-Fly Guidance Training for Medical Image Registration", "authors":[ "Xin, Yuelin", "Chen, Yicheng", "Ji, Shengxiang", "Han, Kun", "Xie, Xiaohui" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cilix-ai\/on-the-fly-guidance" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":681 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0334_paper.pdf", "bibtext":"@InProceedings{ Qu_Multimodal_MICCAI2024,\n author = { Qu, Linhao and Huang, Dan and Zhang, Shaoting and Wang, Xiaosong },\n title = { { Multi-modal Data Binding for Survival Analysis Modeling with Incomplete Data and Annotations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival analysis stands as a pivotal process in cancer treatment research, crucial for predicting patient survival rates accurately. Recent advancements in data collection techniques have paved the way for enhancing survival predictions by integrating information from multiple modalities. However, real-world scenarios often present challenges with incomplete data, particularly when dealing with censored survival labels. Prior works have addressed missing modalities but have overlooked incomplete labels, which can introduce bias and limit model efficacy. To bridge this gap, we introduce a novel framework that simultaneously handles incomplete data across modalities and censored survival labels. Our approach employs advanced foundation models to encode individual modalities and align them into a universal representation space for seamless fusion. By generating pseudo labels and incorporating uncertainty, we significantly enhance predictive accuracy. The proposed method demonstrates outstanding prediction accuracy in two survival analysis tasks on both employed datasets. This innovative approach overcomes limitations associated with disparate modalities and improves the feasibility of comprehensive survival analysis using multiple large foundation models.", "title":"Multi-modal Data Binding for Survival Analysis Modeling with Incomplete Data and Annotations", "authors":[ "Qu, Linhao", "Huang, Dan", "Zhang, Shaoting", "Wang, Xiaosong" ], "id":"Conference", "arxiv_id":"2407.17726", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":682 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4008_paper.pdf", "bibtext":"@InProceedings{ Bun_Learning_MICCAI2024,\n author = { Bunnell, Arianna and Glaser, Yannik and Valdez, Dustin and Wolfgruber, Thomas and Altamirano, Aleen and Zamora Gonz\u00e1lez, Carol and Hernandez, Brenda Y. and Sadowski, Peter and Shepherd, John A. },\n title = { { Learning a Clinically-Relevant Concept Bottleneck for Lesion Detection in Breast Ultrasound } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Detecting and classifying lesions in breast ultrasound images is a promising application of artificial intelligence (AI) for reducing the burden of cancer in regions with limited access to mammography. Such AI systems are more likely to be useful in a clinical setting if their predictions can be explained. This work proposes an explainable AI model that provides interpretable predictions using a standard lexicon from the American College of Radiology\u2019s Breast Imaging and Reporting Data System (BI-RADS). The model is a deep neural network which predicts BI-RADS features in a concept bottleneck layer for cancer classification. This architecture enables radiologists to interpret the predictions of the AI system from the concepts and potentially fix errors in real time by modifying the concept predictions. In experiments, a model is developed on 8,854 images from 994 women with expert annotations and histological cancer labels. The model outperforms state-of-the-art lesion detection frameworks with 48.9 average precision on the held-out testing set. For cancer classification concept intervention increases performance from 0.876 to 0.885 area under the receiver operating characteristic curve. Training and evaluation code is available at https:\/\/github.com\/hawaii-ai\/bus-cbm.", "title":"Learning a Clinically-Relevant Concept Bottleneck for Lesion Detection in Breast Ultrasound", "authors":[ "Bunnell, Arianna", "Glaser, Yannik", "Valdez, Dustin", "Wolfgruber, Thomas", "Altamirano, Aleen", "Zamora Gonz\u00e1lez, Carol", "Hernandez, Brenda Y.", "Sadowski, Peter", "Shepherd, John A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/hawaii-ai\/bus-cbm" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":683 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3893_paper.pdf", "bibtext":"@InProceedings{ Yi_Hallucinated_MICCAI2024,\n author = { Yi, Jingjun and Bi, Qi and Zheng, Hao and Zhan, Haolan and Ji, Wei and Huang, Yawen and Li, Shaoxin and Li, Yuexiang and Zheng, Yefeng and Huang, Feiyue },\n title = { { Hallucinated Style Distillation for Single Domain Generalization in Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Single domain generalization (single-DG) for medical image segmentation aims to learn a style-invariant representation, which can be generalized to a variety unseen target domains, with the data from a single source. However, due to the limitation of sample diversity in the single source domain, the robustness of generalized features yielded by existing single-DG methods is still unsatisfactory.\nIn this paper, we propose a novel single-DG framework, namely Hallucinated Style Distillation (HSD), to generate the robust style-invariant feature representation.\nParticularly, our HSD firstly expands the style diversity of the single source domain via hallucinating the samples with random styles.\nThen, a hallucinated cross-domain distillation paradigm is proposed to distillate the style-invariant knowledge between the original and style-hallucinated medical images.\nSince the hallucinated styles close to the source domain may over-fit our distillation paradigm, we further propose a learning objective to diversify style-invariant representation, which alleviates the over-fitting issue and smooths the learning process of generalized features.\nExtensive experiments on two standard domain generalized medical image segmentation datasets show the state-of-the-art performance of our HSD.\nSource code will be publicly available.", "title":"Hallucinated Style Distillation for Single Domain Generalization in Medical Image Segmentation", "authors":[ "Yi, Jingjun", "Bi, Qi", "Zheng, Hao", "Zhan, Haolan", "Ji, Wei", "Huang, Yawen", "Li, Shaoxin", "Li, Yuexiang", "Zheng, Yefeng", "Huang, Feiyue" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":684 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0616_paper.pdf", "bibtext":"@InProceedings{ Cai_Rethinking_MICCAI2024,\n author = { Cai, Yu and Chen, Hao and Cheng, Kwang-Ting },\n title = { { Rethinking Autoencoders for Medical Anomaly Detection from A Theoretical Perspective } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical anomaly detection aims to identify abnormal findings using only normal training data, playing a crucial role in health screening and recognizing rare diseases. Reconstruction-based methods, particularly those utilizing autoencoders (AEs), are dominant in this field. They work under the assumption that AEs trained on only normal data cannot reconstruct unseen abnormal regions well, thereby enabling the anomaly detection based on reconstruction errors. However, this assumption does not always hold due to the mismatch between the reconstruction training objective and the anomaly detection task objective, rendering these methods theoretically unsound. This study focuses on providing a theoretical foundation for AE-based reconstruction methods in anomaly detection. By leveraging information theory, we elucidate the principles of these methods and reveal that the key to improving AE in anomaly detection lies in minimizing the information entropy of latent vectors. Experiments on four datasets with two image modalities validate the effectiveness of our theory. To the best of our knowledge, this is the first effort to theoretically clarify the principles and design philosophy of AE for anomaly detection. The code is available at \\url{https:\/\/github.com\/caiyu6666\/AE4AD}.", "title":"Rethinking Autoencoders for Medical Anomaly Detection from A Theoretical Perspective", "authors":[ "Cai, Yu", "Chen, Hao", "Cheng, Kwang-Ting" ], "id":"Conference", "arxiv_id":"2403.09303", "GitHub":[ "https:\/\/github.com\/caiyu6666\/AE4AD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":685 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2095_paper.pdf", "bibtext":"@InProceedings{ Dan_CINA_MICCAI2024,\n author = { Dannecker, Maik and Kyriakopoulou, Vanessa and Cordero-Grande, Lucilio and Price, Anthony N. and Hajnal, Joseph V. and Rueckert, Daniel },\n title = { { CINA: Conditional Implicit Neural Atlas for Spatio-Temporal Representation of Fetal Brains } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"We introduce a conditional implicit neural atlas (CINA) for spatio-temporal atlas generation from Magnetic Resonance Images (MRI) of the neurotypical and pathological fetal brain, that is fully independent of affine or non-rigid registration. During training, CINA learns a general representation of the fetal brain and encodes subject specific information into latent code. After training, CINA can construct a faithful atlas with tissue probability maps of the fetal brain for any gestational age (GA) and anatomical variation covered within the training domain. Thus, CINA is competent to represent both, neurotypical and pathological brains. Furthermore, a trained CINA model can be fit to brain MRI of unseen subjects via test-time optimization of the latent code. CINA can then produce probabilistic tissue maps tailored to a particular subject. We evaluate our method on a total of 198 T2 weighted MRI of normal and abnormal fetal brains from the dHCP and FeTA datasets. We demonstrate CINA\u2019s capability to represent a fetal brain atlas that can be flexibly conditioned on GA and on anatomical variations like ventricular volume or degree of cortical folding, making it a suitable tool for modeling both neurotypical and pathological brains. We quantify the fidelity of our atlas by means of tissue segmentation and age prediction and compare it to an established baseline. CINA demonstrates superior accuracy for neurotypical brains and pathological brains with ventriculomegaly. Moreover, CINA scores a mean absolute error of 0.23 weeks in fetal brain age prediction, further confirming an accurate representation of fetal brain development.", "title":"CINA: Conditional Implicit Neural Atlas for Spatio-Temporal Representation of Fetal Brains", "authors":[ "Dannecker, Maik", "Kyriakopoulou, Vanessa", "Cordero-Grande, Lucilio", "Price, Anthony N.", "Hajnal, Joseph V.", "Rueckert, Daniel" ], "id":"Conference", "arxiv_id":"2403.08550", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":686 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3075_paper.pdf", "bibtext":"@InProceedings{ Bar_Average_MICCAI2024,\n author = { Barfoot, Theodore and Garcia Peraza Herrera, Luis C. and Glocker, Ben and Vercauteren, Tom },\n title = { { Average Calibration Error: A Differentiable Loss for Improved Reliability in Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep neural networks for medical image segmentation often produce overconfident results misaligned with empirical observations. Such miscalibration, challenges their clinical translation. We propose to use marginal L1 average calibration error (mL1-ACE) as a novel auxiliary loss function to improve pixel-wise calibration without compromising segmentation quality. We show that this loss, despite using hard binning, is directly differentiable, bypassing the need for approximate but differentiable surrogate or soft binning approaches. Our work also introduces the concept of dataset reliability histograms which generalises standard reliability diagrams for refined visual assessment of calibration in semantic segmentation aggregated at the dataset level. Using mL1-ACE, we reduce average and maximum calibration error by 45% and 55% respectively, maintaining a Dice score of 87% on the BraTS 2021 dataset. We share our code here: https:\/\/github.com\/cai4cai\/ACE-DLIRIS.", "title":"Average Calibration Error: A Differentiable Loss for Improved Reliability in Image Segmentation", "authors":[ "Barfoot, Theodore", "Garcia Peraza Herrera, Luis C.", "Glocker, Ben", "Vercauteren, Tom" ], "id":"Conference", "arxiv_id":"2403.06759", "GitHub":[ "https:\/\/github.com\/cai4cai\/ACE-DLIRIS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":687 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0306_paper.pdf", "bibtext":"@InProceedings{ Kim_Quantitative_MICCAI2024,\n author = { Kim, Young-Min and Kim, Myeong-Gee and Oh, Seok-Hwan and Jung, Guil and Lee, Hyeon-Jik and Kim, Sang-Yun and Kwon, Hyuk-Sool and Choi, Sang-Il and Bae, Hyeon-Min },\n title = { { Quantitative Assessment of Thyroid Nodules through Ultrasound Imaging Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent studies have proposed quantitative ultrasound (QUS) to extract the acoustic properties of tissues from pulse-echo data obtained through multiple transmissions. In this paper, we introduce a learning-based approach to identify thyroid nodule malignancy by extracting acoustic attenuation and speed of sound from ultrasound imaging. The proposed method employs a neural model that integrates a convolutional neural network (CNN) for detailed local pulse-echo pattern analysis with a Transformer architecture, enhancing the model\u2019s ability to capture complex correlations among multiple beam receptions. B-mode images are employed as both an input and label to guarantee robust performance regardless of the complex structures present in the human neck, such as the thyroid, blood vessels, and trachea. In order to train the proposed deep neural model, a simulation phantom mimicking the structure of human muscle, fat layers, and the shape of the thyroid gland has been designed. The effectiveness of the proposed method is evaluated through numerical simulations and clinical tests.", "title":"Quantitative Assessment of Thyroid Nodules through Ultrasound Imaging Analysis", "authors":[ "Kim, Young-Min", "Kim, Myeong-Gee", "Oh, Seok-Hwan", "Jung, Guil", "Lee, Hyeon-Jik", "Kim, Sang-Yun", "Kwon, Hyuk-Sool", "Choi, Sang-Il", "Bae, Hyeon-Min" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":688 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0233_paper.pdf", "bibtext":"@InProceedings{ Elb_An_MICCAI2024,\n author = { Elbatel, Marawan and Kamnitsas, Konstantinos and Li, Xiaomeng },\n title = { { An Organism Starts with a Single Pix-Cell: A Neural Cellular Diffusion for High-Resolution Image Synthesis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Generative modeling seeks to approximate the statistical properties of real data, enabling synthesis of new data that closely resembles the original distribution. Generative Adversarial Networks (GANs) and Denoising Diffusion Probabilistic Models (DDPMs) represent significant advancements in generative modeling, drawing inspiration from game theory and thermodynamics, respectively. Nevertheless, the exploration of generative modeling through the lens of biological evolution remains largely untapped. In this paper, we introduce a novel family of models termed Generative Cellular Automata (GeCA), inspired by the evolution of an organism from a single cell. GeCAs are evaluated as an effective augmentation tool for retinal disease classification across two imaging modalities: Fundus and Optical Coherence Tomography (OCT). In the context of OCT imaging, where data is scarce and the distribution of classes is inherently skewed, GeCA significantly boosts the performance of 11 different ophthalmological conditions, achieving a 12% increase in the average F1 score compared to conventional baselines. GeCAs outperform both diffusion methods that incorporate UNet or state-of-the art variants with transformer-based denoising models, under similar parameter constraints. Code is available at: https:\/\/github.com\/xmed-lab\/GeCA.", "title":"An Organism Starts with a Single Pix-Cell: A Neural Cellular Diffusion for High-Resolution Image Synthesis", "authors":[ "Elbatel, Marawan", "Kamnitsas, Konstantinos", "Li, Xiaomeng" ], "id":"Conference", "arxiv_id":"2407.03018", "GitHub":[ "https:\/\/github.com\/xmed-lab\/GeCA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":689 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1433_paper.pdf", "bibtext":"@InProceedings{ Sie_PULPo_MICCAI2024,\n author = { Siegert, Leonard and Fischer, Paul and Heinrich, Mattias P. and Baumgartner, Christian F. },\n title = { { PULPo: Probabilistic Unsupervised Laplacian Pyramid Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deformable image registration is fundamental to many medical imaging applications. Registration is an inherently ambiguous task often admitting many viable solutions. While neural network-based registration techniques enable fast and accurate registration, the majority of existing approaches are not able to estimate uncertainty. Here, we present PULPo, a method for probabilistic deformable registration capable of uncertainty quantification. PULPo probabilistically models the distribution of deformation fields on different hierarchical levels combining them using Laplacian pyramids. This allows our method to model global as well as local aspects of the deformation field. We evaluate our method on two widely used neuroimaging datasets and find that it achieves high registration performance as well as substantially better calibrated uncertainty quantification compared to the current state-of-the-art.", "title":"PULPo: Probabilistic Unsupervised Laplacian Pyramid Registration", "authors":[ "Siegert, Leonard", "Fischer, Paul", "Heinrich, Mattias P.", "Baumgartner, Christian F." ], "id":"Conference", "arxiv_id":"2407.10567", "GitHub":[ "https:\/\/github.com\/leonardsiegert\/PULPo" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":690 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1176_paper.pdf", "bibtext":"@InProceedings{ Sun_FedMLP_MICCAI2024,\n author = { Sun, Zhaobin and Wu, Nannan and Shi, Junjie and Yu, Li and Cheng, Kwang-Ting and Yan, Zengqiang },\n title = { { FedMLP: Federated Multi-Label Medical Image Classification under Task Heterogeneity } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cross-silo federated learning (FL) enables decentralized organizations to collaboratively train models while preserving data privacy and has made significant progress in medical image classification. One common assumption is task homogeneity where each client has access to all classes during training. However, in clinical practice, given a multi-label classification task, constrained by the level of medical knowledge and the prevalence of diseases, each institution may diagnose only partial categories, resulting in task heterogeneity. How to pursue effective multi-label medical image classification under task heterogeneity is under-explored. In this paper, we first formulate such a realistic label missing setting in the multi-label FL domain and propose a two-stage method FedMLP to combat class missing from two aspects: pseudo label tagging and global knowledge learning. The former utilizes a warmed-up model to generate class prototypes and select samples with high confidence to supplement missing labels, while the latter uses a global model as a teacher for consistency regularization to prevent forgetting missing class knowledge. Experiments on two publicly-available medical datasets validate the superiority of FedMLP against the state-of-the-art both federated semi-supervised and noisy label learning approaches under task heterogeneity. Code is available at https:\/\/github.com\/szbonaldo\/FedMLP.", "title":"FedMLP: Federated Multi-Label Medical Image Classification under Task Heterogeneity", "authors":[ "Sun, Zhaobin", "Wu, Nannan", "Shi, Junjie", "Yu, Li", "Cheng, Kwang-Ting", "Yan, Zengqiang" ], "id":"Conference", "arxiv_id":"2406.18995", "GitHub":[ "https:\/\/github.com\/szbonaldo\/FedMLP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":691 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3070_paper.pdf", "bibtext":"@InProceedings{ Che_Ultrasound_MICCAI2024,\n author = { Chen, Tingxiu and Shi, Yilei and Zheng, Zixuan and Yan, Bingcong and Hu, Jingliang and Zhu, Xiao Xiang and Mou, Lichao },\n title = { { Ultrasound Image-to-Video Synthesis via Latent Dynamic Diffusion Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ultrasound video classification enables automated diagnosis and has emerged as an important research area. However, publicly available ultrasound video datasets remain scarce, hindering progress in developing effective video classification models. We propose addressing this shortage by synthesizing plausible ultrasound videos from readily available, abundant ultrasound images. To this end, we introduce a latent dynamic diffusion model (LDDM) to efficiently translate static images to dynamic sequences with realistic video characteristics. We demonstrate strong quantitative results and visually appealing synthesized videos on the BUSV benchmark. Notably, training video classification models on combinations of real and LDDM-synthesized videos substantially improves performance over using real data alone, indicating our method successfully emulates dynamics critical for discrimination. Our image-to-video approach provides an effective data augmentation solution to advance ultrasound video analysis. Code is available at https:\/\/github.com\/MedAITech\/U_I2V.", "title":"Ultrasound Image-to-Video Synthesis via Latent Dynamic Diffusion Models", "authors":[ "Chen, Tingxiu", "Shi, Yilei", "Zheng, Zixuan", "Yan, Bingcong", "Hu, Jingliang", "Zhu, Xiao Xiang", "Mou, Lichao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MedAITech\/U_I2V" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":692 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1676_paper.pdf", "bibtext":"@InProceedings{ Liu_Sparsity_MICCAI2024,\n author = { Liu, Mingyuan and Xu, Lu and Liu, Shengnan and Zhang, Jicong },\n title = { { Sparsity- and Hybridity-Inspired Visual Parameter-Efficient Fine-Tuning for Medical Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The success of Large Vision Models (LVMs) is accompanied by vast data volumes, which are prohibitively expensive in medical diagnosis. To address this, recent efforts exploit Parameter-Efficient Fine-Tuning (PEFT), which trains a small number of weights while freezing the rest for knowledge transfer. However, they typically assign trainable weights to the same positions in LVMs in a heuristic manner, regardless of task differences, making them suboptimal for professional applications like medical diagnosis. To address this, we statistically reveal the nature of sparsity and hybridity during diagnostic-targeted fine-tuning, i.e., a small portion of key weights significantly impacts performance, and these key weights are hybrid, including both task-specific and task-agnostic parts. Based on this, we propose a novel Sparsity- and Hybridity-inspired Parameter Efficient Fine-Tuning (SH-PEFT). It selects and trains a small portion of weights based on their importance, which is innovatively estimated by hybridizing both task-specific and task-agnostic strategies. Validated on six medical datasets of different modalities, we demonstrate that SH-PEFT achieves state-of-the-art performance in transferring LVMs to medical diagnosis in terms of accuracy. By tuning around 0.01% number of weights, it outperforms full model fine-tuning. Moreover, SH-PEFT also performs comparably to other models deliberately optimized for specific medical tasks. Extensive experiments demonstrate the effectiveness of each design and reveal the great potential of pre-trained LVM transfer for medical diagnosis.", "title":"Sparsity- and Hybridity-Inspired Visual Parameter-Efficient Fine-Tuning for Medical Diagnosis", "authors":[ "Liu, Mingyuan", "Xu, Lu", "Liu, Shengnan", "Zhang, Jicong" ], "id":"Conference", "arxiv_id":"2405.17877", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":693 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3648_paper.pdf", "bibtext":"@InProceedings{ Cui_7T_MICCAI2024,\n author = { Cui, Qiming and Tosun, Duygu and Mukherjee, Pratik and Abbasi-Asl, Reza },\n title = { { 7T MRI Synthesization from 3T Acquisitions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Supervised deep learning techniques can be used to generate synthetic 7T MRIs from 3T MRI inputs. This image enhancement process leverages the advantages of ultra-high-field MRI to improve the signal-to-noise and contrast-to-noise ratios of 3T acquisitions. In this paper, we introduce multiple novel 7T synthesization algorithms based on custom-designed variants of the V-Net convolutional neural network. We demonstrate that the V-Net based model has superior performance in enhancing both single-site and multi-site MRI datasets compared to the existing benchmark model. When trained on 3T-7T MRI pairs from 8 subjects with mild Traumatic Brain Injury (TBI), our model achieves state-of-the-art 7T synthesization performance. Compared to previous works, synthetic 7T images generated from our pipeline also display superior enhancement of pathological tissue. Additionally, we implement and test a data augmentation scheme for training models that are robust to variations in the input distribution. This allows synthetic 7T models to accommodate intra-scanner and inter-scanner variability in multisite datasets. On a harmonized dataset consisting of 18 3T-7T MRI pairs from two institutions, including both healthy subjects and those with mild TBI, our model maintains its performance and can generalize to 3T MRI inputs with lower resolution. Our findings demonstrate the promise of V-Net based models for MRI enhancement and offer a preliminary probe into improving the generalizability of synthetic 7T models with data augmentation.", "title":"7T MRI Synthesization from 3T Acquisitions", "authors":[ "Cui, Qiming", "Tosun, Duygu", "Mukherjee, Pratik", "Abbasi-Asl, Reza" ], "id":"Conference", "arxiv_id":"2403.08979", "GitHub":[ "https:\/\/github.com\/abbasilab\/Synthetic_7T_MRI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":694 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2862_paper.pdf", "bibtext":"@InProceedings{ Zha_Towards_MICCAI2024,\n author = { Zhao, Yisheng and Zhu, Huaiyu and Shu, Qi and Huan, Ruohong and Chen, Shuohui and Pan, Yun },\n title = { { Towards a Deeper insight into Face Detection in Neonatal wards } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neonatal face detection is the prerequisite for face-based intelligent medical applications. Nevertheless, it has been found that this area has received minimal attention in existing research. The paucity of open-source, large-scale datasets significantly constrains current studies, which are further compounded by issues such as large-scale occlusions, class imbalance, and precise localization requirements. This work aims to address these challenges from both data and methodological perspectives. We constructed the first open-source face detection dataset for neonates, involving images from 1,000 neonates in the neonatal wards. Utilizing this dataset and adopting NICUface-RF as the baseline, we introduce two novel modules. The hierarchical contextual classification aims to improve the positive\/negative anchor ratios and alleviate large-scale occlusions. Concurrently, the DIoU-aware NMS is designed to preserve bounding boxes of superior localization quality by employing predicted DIoUs as the ranking criterion in NMS procedures. Experimental results illustrate the superiority of our method. The dataset and code is available at https:\/\/github.com\/neonatal-pain.", "title":"Towards a Deeper insight into Face Detection in Neonatal wards", "authors":[ "Zhao, Yisheng", "Zhu, Huaiyu", "Shu, Qi", "Huan, Ruohong", "Chen, Shuohui", "Pan, Yun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":695 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1467_paper.pdf", "bibtext":"@InProceedings{ Li_DiffusionEnhanced_MICCAI2024,\n author = { Li, Xiang and Fang, Huihui and Liu, Mingsi and Xu, Yanwu and Duan, Lixin },\n title = { { Diffusion-Enhanced Transformation Consistency Learning for Retinal Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Retinal image segmentation plays a critical role in rapid disease detection and early detection, such as assisting in the observation of abnormal structures and structural quantification. However, acquiring semantic segmentation labels is both expensive and time-consuming. To improve label utilization efficiency in semantic segmentation models, we propose Diffusion-Enhanced Transformation Consistency Learning (termed as DiffTCL), a semi-supervised segmentation approach. Initially, the model undergoes self-supervised diffusion pre-training, establishing a reasonable initial model to improve the accuracy of early pseudo-labels in the subsequent consistency training, thereby preventing error accumulation. Furthermore, we developed a Transformation Consistency Learning (TCL) method for retinal images, effectively utilizing unlabeled data. In TCL, the prediction of image affine transformations acts as supervision for both image elastic transformations and pixel-level transformations. We carry out evaluations on the REFUGE2 and MS datasets, involving the segmentation of two modalities: optic disc\/cup segmentation in color fundus photography, and layer segmentation in optical coherence tomography. The results for both tasks demonstrate that DiffTCL achieves relative improvements of 5.0% and 2.3%, respectively, over other state-of-the-art semi-supervised methods. The code is available at: https:\/\/github.com\/lixiang007666\/DiffTCL.", "title":"Diffusion-Enhanced Transformation Consistency Learning for Retinal Image Segmentation", "authors":[ "Li, Xiang", "Fang, Huihui", "Liu, Mingsi", "Xu, Yanwu", "Duan, Lixin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/lixiang007666\/DiffTCL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":696 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3555_paper.pdf", "bibtext":"@InProceedings{ Men_PoseGuideNet_MICCAI2024,\n author = { Men, Qianhui and Guo, Xiaoqing and Papageorghiou, Aris T. and Noble, J. Alison },\n title = { { Pose-GuideNet: Automatic Scanning Guidance for Fetal Head Ultrasound from Pose Estimation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D pose estimation from a 2D cross-sectional view enables healthcare professionals to navigate through the 3D space, and such techniques initiate automatic guidance in many image-guided radiology applications. In this work, we investigate how estimating 3D fetal pose from freehand 2D ultrasound scanning can guide a sonographer to locate a head standard plane. Fetal head pose is estimated by the proposed Pose-GuideNet, a novel 2D\/3D registration approach to align freehand 2D ultrasound to a 3D anatomical atlas without the acquisition of 3D ultrasound. To facilitate the 2D to 3D cross-dimensional projection, we exploit the prior knowledge in the atlas to align the standard plane frame in a freehand scan. A semantic-aware contrastive-based approach is further proposed to align the frames that are off standard planes based on their anatomical similarity. In the experiment, we enhance the existing assessment of freehand image localization by comparing the transformation of its estimated pose towards standard plane with the corresponding probe motion, which reflects the actual view change in 3D anatomy. Extensive results on two clinical head biometry tasks show that Pose-GuideNet not only accurately predicts pose but also successfully predicts the direction of the fetal head. Evaluations with probe motions further demonstrate the feasibility of adopting Pose-GuideNet for freehand ultrasound-assisted navigation in a sensor-free environment.", "title":"Pose-GuideNet: Automatic Scanning Guidance for Fetal Head Ultrasound from Pose Estimation", "authors":[ "Men, Qianhui", "Guo, Xiaoqing", "Papageorghiou, Aris T.", "Noble, J. Alison" ], "id":"Conference", "arxiv_id":"2408.09931", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":697 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2315_paper.pdf", "bibtext":"@InProceedings{ Jia_Explanationdriven_MICCAI2024,\n author = { Jiang, Ning and Huang, Zhengyong and Sui, Yao },\n title = { { Explanation-driven Cyclic Learning for High-Quality Brain MRI Reconstruction from Unknown Degradation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Spatial resolution, signal-to-noise ratio (SNR), and motion artifacts critically matter in any Magnetic Resonance Imaging (MRI) practices. Unfortunately, it is difficult to achieve a trade-off between these factors. Scans with an increased spatial resolution require prolonged scan times and suffer from drastically reduced SNR. Increased scan time necessarily increases the potential of subject motion. Recently, end-to-end deep learning techniques have emerged as a post-acquisition method to deal with the above issues by reconstructing high-quality MRI images from various sources of degradation such as motion, noise, and low resolution. However, those methods focus on a single known source of degradation, while multiple unknown sources of degradation commonly happen in a single scan. We aimed to develop a new methodology that enables high-quality MRI reconstruction from scans corrupted by a mixture of multiple unknown sources of degradation. We proposed a unified reconstruction framework based on explanation-driven cyclic learning. We designed an interpretation strategy for the neural networks, the Cross-Attention-Gradient (CAG), which generates pixel-level explanations from degraded images to enhance reconstruction with degradation-specific knowledge. We developed a cyclic learning scheme that comprises a front-end classification task and a back-end image reconstruction task, circularly shares knowledge between different tasks and benefits from multi-task learning. We assessed our method on three public datasets, including the real and clean MRI scans from 140 subjects with simulated degradation, and the real and motion-degraded MRI scans from 10 subjects. We identified 5 sources of degradation for the simulated data. Experimental results demonstrated that our approach achieved superior reconstructions in motion correction, SNR improvement, and resolution enhancement, as compared to state-of-the-art methods.", "title":"Explanation-driven Cyclic Learning for High-Quality Brain MRI Reconstruction from Unknown Degradation", "authors":[ "Jiang, Ning", "Huang, Zhengyong", "Sui, Yao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":698 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3796_paper.pdf", "bibtext":"@InProceedings{ Liu_kt_MICCAI2024,\n author = { Liu, Ye and Cui, Zhuo-Xu and Sun, Kaicong and Zhao, Ting and Cheng, Jing and Zhu, Yuliang and Shen, Dinggang and Liang, Dong },\n title = { { k-t Self-Consistency Diffusion: A Physics-Informed Model for Dynamic MR Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion models exhibit promising prospects in magnetic resonance (MR) image reconstruction due to their robust image generation and generalization capabilities. However, current diffusion models are predominantly customized for 2D image reconstruction tasks. When addressing dynamic MR imaging (dMRI), the challenge lies in accurately generating 2D images while simultaneously adhering to the temporal direction and matching the motion patterns of the scanned regions. In dynamic parallel imaging, motion patterns can be characterized through the self-consistency of k-t data. Motivated by this observation, we propose to design a diffusion model that aligns with k-t self-consistency. Specifically, following a discrete iterative algorithm to optimize k-t self-consistency, we extend it to a continuous formulation, thereby designing a stochastic diffusion equation in line with k-t self-consistency. Finally, by incorporating the score-matching method to estimate prior terms, we construct a diffusion model for dMRI. Experimental results on a cardiac dMRI dataset showcase the superiority of our method over current state-of-the-art techniques. Our approach exhibits remarkable reconstruction potential even at extremely high acceleration factors, reaching up to 24X, and demonstrates robust generalization for dynamic data with temporally shuffled frames.", "title":"k-t Self-Consistency Diffusion: A Physics-Informed Model for Dynamic MR Imaging", "authors":[ "Liu, Ye", "Cui, Zhuo-Xu", "Sun, Kaicong", "Zhao, Ting", "Cheng, Jing", "Zhu, Yuliang", "Shen, Dinggang", "Liang, Dong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":699 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2260_paper.pdf", "bibtext":"@InProceedings{ Sch_LargeScale_MICCAI2024,\n author = { Schnabel, Till N. and Lill, Yoriko and Benitez, Benito K. and Nalabothu, Prasad and Metzler, Philipp and Mueller, Andreas A. and Gross, Markus and G\u00f6zc\u00fc, Baran and Solenthaler, Barbara },\n title = { { Large-Scale 3D Infant Face Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Learned 3-dimensional face models have emerged as valuable tools for statistically modeling facial variations, facilitating a wide range of applications in computer graphics, computer vision, and medicine. While these models have been extensively developed for adult faces, research on infant face models remains sparse, limited to a few models trained on small datasets, none of which are publicly available. We propose a novel approach to address this gap by developing a large-scale 3D INfant FACE model (INFACE) using a diverse set of face scans. By harnessing uncontrolled and incomplete data, INFACE surpasses previous efforts in both scale and accessibility. Notably, it represents the first publicly available shape model of its kind, facilitating broader adoption and further advancements in the field. We showcase the versatility of our learned infant face model through multiple potential clinical applications, including shape and appearance completion for mesh cleaning and treatment planning, as well as 3D face reconstruction from images captured in uncontrolled environments. By disentangling expression and identity, we further enable the neutralization of facial features \u2014 a crucial capability given the unpredictable nature of infant scanning.", "title":"Large-Scale 3D Infant Face Model", "authors":[ "Schnabel, Till N.", "Lill, Yoriko", "Benitez, Benito K.", "Nalabothu, Prasad", "Metzler, Philipp", "Mueller, Andreas A.", "Gross, Markus", "G\u00f6zc\u00fc, Baran", "Solenthaler, Barbara" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":700 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1496_paper.pdf", "bibtext":"@InProceedings{ Gao_DeSAM_MICCAI2024,\n author = { Gao, Yifan and Xia, Wei and Hu, Dingdu and Wang, Wenkui and Gao, Xin },\n title = { { DeSAM: Decoupled Segment Anything Model for Generalizable Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning-based medical image segmentation models often suffer from domain shift, where the models trained on a source domain do not generalize well to other unseen domains. As a prompt-driven foundation model with powerful generalization capabilities, the Segment Anything Model (SAM) shows potential for improving the cross-domain robustness of medical image segmentation. However, SAM performs significantly worse in automatic segmentation scenarios than when manually prompted, hindering its direct application to domain generalization. Upon further investigation, we discovered that the degradation in performance was related to the coupling effect of inevitable poor prompts and mask generation. To address the coupling effect, we propose the Decoupled SAM (DeSAM). DeSAM modifies SAM\u2019s mask decoder by introducing two new modules: a prompt-relevant IoU module (PRIM) and a prompt-decoupled mask module (PDMM). PRIM predicts the IoU score and generates mask embeddings, while PDMM extracts multi-scale features from the intermediate layers of the image encoder and fuses them with the mask embeddings from PRIM to generate the final segmentation mask. This decoupled design allows DeSAM to leverage the pre-trained weights while minimizing the performance degradation caused by poor prompts. We conducted experiments on publicly available cross-site prostate and cross-modality abdominal image segmentation datasets. The results show that our DeSAM leads to a substantial performance improvement over previous state-of-theart domain generalization methods. The code is publicly available at https:\/\/github.com\/yifangao112\/DeSAM.", "title":"DeSAM: Decoupled Segment Anything Model for Generalizable Medical Image Segmentation", "authors":[ "Gao, Yifan", "Xia, Wei", "Hu, Dingdu", "Wang, Wenkui", "Gao, Xin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yifangao112\/DeSAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":701 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1955_paper.pdf", "bibtext":"@InProceedings{ Imr_BrainShift_MICCAI2024,\n author = { Imre, Baris and Thibeau-Sutre, Elina and Reimer, Jorieke and Kho, Kuan and Wolterink, Jelmer M. },\n title = { { Brain-Shift: Unsupervised Pseudo-Healthy Brain Synthesis for Novel Biomarker Extraction in Chronic Subdural Hematoma } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Chronic subdural hematoma (cSDH) is a common neurological condition characterized by the accumulation of blood between the brain and the dura mater. This accumulation of blood can exert pressure on the brain, potentially leading to fatal outcomes. Treatment options for cSDH are limited to invasive surgery or non-invasive management. Traditionally, the midline shift, hand-measured by experts from an ideal sagittal plane, and the hematoma volume have been the primary metrics for quantifying and analyzing cSDH. However, these approaches do not quantify the local 3D brain deformation caused by cSDH. We propose a novel method using anatomy-aware unsupervised diffeomorphic pseudo-healthy synthesis to generate brain deformation fields. The deformation fields derived from this process are utilized to extract biomarkers that quantify the shift in the brain due to cSDH. We use CT scans of 121 patients for training and validation of our method and find that our metrics allow the identification of patients who require surgery. Our results indicate that automatically obtained brain deformation fields might contain prognostic value for personalized cSDH treatment.", "title":"Brain-Shift: Unsupervised Pseudo-Healthy Brain Synthesis for Novel Biomarker Extraction in Chronic Subdural Hematoma", "authors":[ "Imre, Baris", "Thibeau-Sutre, Elina", "Reimer, Jorieke", "Kho, Kuan", "Wolterink, Jelmer M." ], "id":"Conference", "arxiv_id":"2403.19415", "GitHub":[ "https:\/\/github.com\/MIAGroupUT\/Brain-Shift" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":702 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2571_paper.pdf", "bibtext":"@InProceedings{ Xu_Novelty_MICCAI2024,\n author = { Xu, Rui and Yu, Dan and Yang, Xuan and Ye, Xinchen and Wang, Zhihui and Wang, Yi and Wang, Hongkai and Li, Haojie and Huang, Dingpin and Xu, Fangyi and Gan, Yi and Tu, Yuan and Hu, Hongjie },\n title = { { Novelty Detection Based Discriminative Multiple Instance Feature Mining to Classify NSCLC PD-L1 Status on HE-Stained Histopathological Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"It is crucial to analyze HE-stained histopathological whole slide images (WSIs) to classify PD-L1 status for non-small cell lung cancer (NSCLC) patients, due to the expensive immunohistochemical examination performed in practical clinics. Usually, a multiple instance learning (MIL) framework is applied to resolve the classification problems of WSIs. However, the existing MIL methods cannot perform well on the PD-L1 status classification, due to unlearnable instance features and challenging instances containing weak visual differences. To address this problem, we propose a novelty detection based discriminative multiple instance feature mining method. It contains a trainable instance feature encoder, learning effective information from the on-hand dataset to reduce the domain difference problem, and a novelty detection based instance feature mining mechanism, selecting typical instances to train the encoder for mining more discriminative instance features. We evaluate the proposed method on a private NSCLC PD-L1 dataset and the widely used public Camelyon16 dataset that is targeted for breast cancer identification. Experimental results show that the proposed method is not only effective in predicting NSCLC PD-L1 status but also generalized well on the public dataset.", "title":"Novelty Detection Based Discriminative Multiple Instance Feature Mining to Classify NSCLC PD-L1 Status on HE-Stained Histopathological Images", "authors":[ "Xu, Rui", "Yu, Dan", "Yang, Xuan", "Ye, Xinchen", "Wang, Zhihui", "Wang, Yi", "Wang, Hongkai", "Li, Haojie", "Huang, Dingpin", "Xu, Fangyi", "Gan, Yi", "Tu, Yuan", "Hu, Hongjie" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":703 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1037_paper.pdf", "bibtext":"@InProceedings{ Cha_QueryNet_MICCAI2024,\n author = { Chai, Jiaxing and Luo, Zhiming and Gao, Jianzhe and Dai, Licun and Lai, Yingxin and Li, Shaozi },\n title = { { QueryNet: A Unified Framework for Accurate Polyp Segmentation and Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recently, deep learning-based methods have demonstrated effectiveness in the diagnosing of polyps, which holds clinical significance in the prevention of colorectal cancer. These methods can be broadly categorized into two tasks: Polyp Segmentation (PS) and Polyp Detection (PD). The advantage of PS lies in precise localization, but it is constrained by the contrast of the polyp area. On the other hand, PD provides the advantages of global perspective but is susceptible to issues such as false positives or missed detections. Despite substantial progress in both tasks, there has been limited exploration of integrating these two tasks. To address this problem, we introduce QueryNet, a unified framework for accurate polyp segmentation and detection. Specially, our QueryNet is constructed on top of Mask2Former, a query-based segmentation model. It conceptualizes object queries as cluster centers and constructs a detection branch to handle both tasks. Extensive quantitative and qualitative experiments on five public benchmarks verify that this unified framework effectively mitigates the task-specific limitations, thereby enhancing the overall performance. Furthermore, QueryNet achieves comparable performance against state-of-the-art PS and PD methods.", "title":"QueryNet: A Unified Framework for Accurate Polyp Segmentation and Detection", "authors":[ "Chai, Jiaxing", "Luo, Zhiming", "Gao, Jianzhe", "Dai, Licun", "Lai, Yingxin", "Li, Shaozi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/JiaxingChai\/Query_Net" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":704 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2427_paper.pdf", "bibtext":"@InProceedings{ Oh_Are_MICCAI2024,\n author = { Oh, Ji-Hun and Falahkheirkhah, Kianoush and Bhargava, Rohit },\n title = { { Are We Ready for Out-of-Distribution Detection in Digital Pathology? } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The detection of semantic and covariate out-of-distribution (OOD) examples is a critical yet overlooked challenge in digital pathology (DP). Recently, substantial insight and methods on OOD detection were presented by the ML community, but how do they fare in DP applications? To this end, we establish a benchmark study, our highlights being: 1) the adoption of proper evaluation protocols, 2) the comparison of diverse detectors in both a single and multi-model setting, and 3) the exploration into advanced ML settings like transfer learning (ImageNet vs. DP pre-training) and choice of architecture (CNNs vs. transformers). Through our comprehensive experiments, we contribute new insights and guidelines, paving the way for future research and discussion.", "title":"Are We Ready for Out-of-Distribution Detection in Digital Pathology?", "authors":[ "Oh, Ji-Hun", "Falahkheirkhah, Kianoush", "Bhargava, Rohit" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":705 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0912_paper.pdf", "bibtext":"@InProceedings{ Zho_Reprogramming_MICCAI2024,\n author = { Zhou, Yuhang and Du, Siyuan and Li, Haolin and Yao, Jiangchao and Zhang, Ya and Wang, Yanfeng },\n title = { { Reprogramming Distillation for Medical Foundation Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical foundation models pre-trained on large-scale datasets have demonstrated powerful versatile capabilities for various tasks. However, due to the gap between pre-training tasks (or modalities) and downstream tasks (or modalities), the real-world computation and speed constraints, it might not be straightforward to apply medical foundation models in the downstream scenarios. Previous methods, such as parameter efficient fine-tuning (PEFT) methods and knowledge distillation (KD) methods, are unable to simultaneously address the task (or modality) inconsistency and achieve personalized lightweight deployment under diverse real-world demands. To address the above issues, we propose a novel framework called Reprogramming Distillation (RD). On one hand, RD reprograms the original feature space of the foundation model so that it is more relevant to downstream scenarios, aligning tasks and modalities. On the other hand, through a co-training mechanism and a shared classifier, connections are established between the reprogrammed knowledge and the knowledge of student models, ensuring that the reprogrammed feature space can be smoothly mimic by the student model of different structures. Further, to reduce the randomness under different training conditions, we design a Centered Kernel Alignment (CKA) distillation to promote robust knowledge transfer. Empirically, we show that on extensive datasets, RD consistently achieve superior performance compared with previous PEFT and KD methods. Source code is available at: https:\/\/github.com\/MediaBrain-SJTU\/RD", "title":"Reprogramming Distillation for Medical Foundation Models", "authors":[ "Zhou, Yuhang", "Du, Siyuan", "Li, Haolin", "Yao, Jiangchao", "Zhang, Ya", "Wang, Yanfeng" ], "id":"Conference", "arxiv_id":"2407.06504", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":706 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1068_paper.pdf", "bibtext":"@InProceedings{ Yan_Spatiotemporal_MICCAI2024,\n author = { Yan, Ruodan and Sch\u00f6nlieb, Carola-Bibiane and Li, Chao },\n title = { { Spatiotemporal Graph Neural Network Modelling Perfusion MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Perfusion MRI (pMRI) offers valuable insights into tumor vascularity and promises to predict tumor genotypes, thus benefiting prognosis for glioma patients, yet effective models tailored to 4D pMRI are still lacking. This study presents the first attempt to model 4D pMRI using a GNN-based spatiotemporal model (PerfGAT), integrating spatial information and temporal kinetics to predict Isocitrate DeHydrogenase (IDH) mutation status in glioma patients. Specifically, we propose a graph structure learning approach based on edge attention and negative graphs to optimize temporal correlations modeling. Moreover, we design a dual-attention feature fusion module to integrate spatiotemporal features while addressing tumor-related brain regions. Further, we develop a class-balanced augmentation methods tailored to spatiotemporal data, which could mitigate the common label imbalance issue in clinical datasets. Our experimental results demonstrate that the proposed method outperforms other state-of-the-art approaches, promising to model pMRI effectively for patient characterization.", "title":"Spatiotemporal Graph Neural Network Modelling Perfusion MRI", "authors":[ "Yan, Ruodan", "Sch\u00f6nlieb, Carola-Bibiane", "Li, Chao" ], "id":"Conference", "arxiv_id":"2406.06434", "GitHub":[ "https:\/\/github.com\/DaisyYan2000\/PerfGAT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":707 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2847_paper.pdf", "bibtext":"@InProceedings{ Ise_nnUNet_MICCAI2024,\n author = { Isensee, Fabian and Wald, Tassilo and Ulrich, Constantin and Baumgartner, Michael and Roy, Saikat and Maier-Hein, Klaus and J\u00e4ger, Paul F. },\n title = { { nnU-Net Revisited: A Call for Rigorous Validation in 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The release of nnU-Net marked a paradigm shift in 3D medical image segmentation, demonstrating that a properly configured U-Net architecture could still achieve state-of-the-art results. Despite this, the pursuit of novel architectures, and the respective claims of superior performance over the U-Net baseline, continued. In this study, we demonstrate that many of these recent claims fail to hold up when scrutinized for common validation shortcomings, such as the use of inadequate baselines, insufficient datasets, and neglected computational resources. By meticulously avoiding these pitfalls, we conduct a thorough and comprehensive benchmarking of current segmentation methods including CNN-based, Transformer-based, and Mamba-based approaches. In contrast to current beliefs, we find that the recipe for state-of-the-art performance is 1) employing CNN-based U-Net models, including ResNet and ConvNeXt variants, 2) using the nnU-Net framework, and 3) scaling models to modern hardware resources. These results indicate an ongoing innovation bias towards novel architectures in the field and underscore the need for more stringent validation standards in the quest for scientific progress.", "title":"nnU-Net Revisited: A Call for Rigorous Validation in 3D Medical Image Segmentation", "authors":[ "Isensee, Fabian", "Wald, Tassilo", "Ulrich, Constantin", "Baumgartner, Michael", "Roy, Saikat", "Maier-Hein, Klaus", "J\u00e4ger, Paul F." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MIC-DKFZ\/nnUNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":708 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3756_paper.pdf", "bibtext":"@InProceedings{ Pla_ARegionBased_MICCAI2024,\n author = { Playout, Cle\u0301ment and Legault, Zacharie and Duval, Renaud and Boucher, Marie Carole and Cheriet, Farida },\n title = { { A Region-Based Approach to Diabetic Retinopathy Classification with Superpixel Tokenization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"We explore the efficacy of a region-based method for image tokenization, aimed at enhancing the resolution of images fed to a Transformer. This method involves segmenting the image into regions using SLIC superpixels. Spatial features, derived from a pretrained model are aggregated segment-wise and input into a streamlined Vision Transformer (ViT). Our model introduces two novel contributions: the matching of segments to semantic prototypes and the graph-based clustering of tokens to merge similar adjacent segments. This approach leads to a model that not only competes effectively in classifying diabetic retinopathy but also produces high-resolution attribution maps, thereby enhancing the interpretability of its predictions.", "title":"A Region-Based Approach to Diabetic Retinopathy Classification with Superpixel Tokenization", "authors":[ "Playout, Cle\u0301ment", "Legault, Zacharie", "Duval, Renaud", "Boucher, Marie Carole", "Cheriet, Farida" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ClementPla\/RetinalViT\/tree\/prototype_superpixels" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":709 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2494_paper.pdf", "bibtext":"@InProceedings{ Jia_MGDR_MICCAI2024,\n author = { Jiang, Bo and Li, Yapeng and Wan, Xixi and Chen, Yuan and Tu, Zhengzheng and Zhao, Yumiao and Tang, Jin },\n title = { { MGDR: Multi-Modal Graph Disentangled Representation for Brain Disease Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the task of disease prediction, medical data with different modalities can provide much complementary information for disease diagnosis. However, existing multi-modal learning methods often tend to focus on learning shared representation across modalities for disease diagnosis, without fully exploiting the complementary information from multiple modalities. To overcome this limitation, in this paper, we propose a novel Multi-modal Graph Disentangled Representation (MGDR) approach for brain disease prediction problem. Specifically, we first construct a specific modality graph for each modality data and employ Graph Convolutional Network (GCN) to learn node representations. Then, we learn the common information across different modalities and private information of each modality by developing a disentangled representation of modalities model. Moreover, to remove the possible noise from the private information, we employ a contrastive learning module to learn more compact representation of private information for each modality. Also, a new Multi-modal Perception Attention (MPA) module is employed to integrate feature representations of multiple private information. Finally, we integrate both common and private information together for disease prediction. Experiments on both ABIDE and TADPOLE datasets demonstrate that our MGDR method achieves the best performance when compared with some recent advanced methods.", "title":"MGDR: Multi-Modal Graph Disentangled Representation for Brain Disease Prediction", "authors":[ "Jiang, Bo", "Li, Yapeng", "Wan, Xixi", "Chen, Yuan", "Tu, Zhengzheng", "Zhao, Yumiao", "Tang, Jin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":710 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1757_paper.pdf", "bibtext":"@InProceedings{ Wan_Weakly_MICCAI2024,\n author = { Wang, Haoyu and Li, Kehan and Zhu, Jihua and Wang, Fan and Lian, Chunfeng and Ma, Jianhua },\n title = { { Weakly Supervised Tooth Instance Segmentation on 3D Dental Models with Multi-Label Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automatic tooth segmentation on 3D dental models is a fundamental task for computer-aided orthodontic treatment. Many deep learning methods aimed at precise tooth segmentation currently require meticulous point-wise annotations, which are extremely time-consuming and labor-intensive. To address this issue, we proposed a weakly supervised tooth instance segmentation network (WS-TIS), which only requires coarse class labels along with approximately 50% of point-wise tooth annotations. Our WS-TIS consists of two stages, including tooth discriminative localization and tooth instance segmentation. Precise tooth localization is frequently pivotal in instance segmentation. However, annotation of tooth centroids or bounding boxes is often challenging when we have limited point-wise tooth annotations. Therefore, we designed a proxy task to weakly supervise tooth localization. Specifically, we utilize a fine-grained multi-label classification task, equipping with the disentangled re-sampling strategy and a gated attention mechanism which can assist the network in learning discriminative tooth features. With discriminative features, certain feature visualization techniques can be easily employed to locate these discriminative regions, thereby accurately cropping out the teeth. In the second stage, a segmentation module was trained on limited annotated data (approximately 50% of all teeth) to accurately segment each tooth from cropping regions. Experiments on Teeth3DS demonstrate that our method with weakly supervised learning and weak annotations, achieves superior performance comparable to state-of-the-art approaches with full annotations.", "title":"Weakly Supervised Tooth Instance Segmentation on 3D Dental Models with Multi-Label Learning", "authors":[ "Wang, Haoyu", "Li, Kehan", "Zhu, Jihua", "Wang, Fan", "Lian, Chunfeng", "Ma, Jianhua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ladderlab-xjtu\/WS-TIS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":711 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2008_paper.pdf", "bibtext":"@InProceedings{ Fis_Progressive_MICCAI2024,\n author = { Fischer, Stefan M. and Felsner, Lina and Osuala, Richard and Kiechle, Johannes and Lang, Daniel M. and Peeken, Jan C. and Schnabel, Julia A. },\n title = { { Progressive Growing of Patch Size: Resource-Efficient Curriculum Learning for Dense Prediction Tasks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this work, we introduce Progressive Growing of Patch Size, a resource-efficient implicit curriculum learning approach for dense prediction tasks. Our curriculum approach is defined by growing the patch size during model training, which gradually increases the task\u2019s difficulty. We integrated our curriculum into the nnU-Net framework and evaluated the methodology on all 10 tasks of the Medical Segmentation Decathlon. With our approach, we are able to substantially reduce runtime, computational costs, and CO$_{2}$ emissions of network training compared to classical constant patch size training. In our experiments, the curriculum approach resulted in improved convergence. We are able to outperform standard nnU-Net training, which is trained with constant patch size, in terms of Dice Score on 7 out of 10 MSD tasks while only spending roughly 50\\% of the original training runtime. To the best of our knowledge, our Progressive Growing of Patch Size is the first successful employment of a sample-length curriculum in the form of patch size in the field of computer vision. Our code is publicly available at \\url{https:\/\/github.com}.", "title":"Progressive Growing of Patch Size: Resource-Efficient Curriculum Learning for Dense Prediction Tasks", "authors":[ "Fischer, Stefan M.", "Felsner, Lina", "Osuala, Richard", "Kiechle, Johannes", "Lang, Daniel M.", "Peeken, Jan C.", "Schnabel, Julia A." ], "id":"Conference", "arxiv_id":"2407.07853", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-fischer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":712 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2185_paper.pdf", "bibtext":"@InProceedings{ Ham_CT2Rep_MICCAI2024,\n author = { Hamamci, Ibrahim Ethem and Er, Sezgin and Menze, Bjoern },\n title = { { CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical imaging plays a crucial role in diagnosis, with radiology reports serving as vital documentation. Automating report generation has emerged as a critical need to alleviate the workload of radiologists. While machine learning has facilitated report generation for 2D medical imaging, extending this to 3D has been unexplored due to computational complexity and data scarcity. We introduce the first method to generate radiology reports for 3D medical imaging, specifically targeting chest CT volumes. Given the absence of comparable methods, we establish a baseline using an advanced 3D vision encoder in medical imaging to demonstrate our method\u2019s effectiveness, which leverages a novel auto-regressive causal transformer. Furthermore, recognizing the benefits of leveraging information from previous visits, we augment CT2Rep with a cross-attention-based multi-modal fusion module and hierarchical memory, enabling the incorporation of longitudinal multimodal data.", "title":"CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging", "authors":[ "Hamamci, Ibrahim Ethem", "Er, Sezgin", "Menze, Bjoern" ], "id":"Conference", "arxiv_id":"2403.06801", "GitHub":[ "https:\/\/github.com\/ibrahimethemhamamci\/CT2Rep" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":713 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3391_paper.pdf", "bibtext":"@InProceedings{ Liu_DiffRect_MICCAI2024,\n author = { Liu, Xinyu and Li, Wuyang and Yuan, Yixuan },\n title = { { DiffRect: Latent Diffusion Label Rectification for Semi-supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised medical image segmentation aims to leverage limited annotated data and rich unlabeled data to perform accurate segmentation. However, existing semi-supervised methods are highly dependent on the quality of self-generated pseudo labels, which are prone to incorrect supervision and confirmation bias. Meanwhile, they are insufficient in capturing the label distributions in latent space and suffer from limited generalization to unlabeled data. To address these issues, we propose a Latent Diffusion Label Rectification Model (DiffRect) for semi-supervised medical image segmentation. DiffRect first utilizes a Label Context Calibration Module (LCC) to calibrate the biased relationship between classes by learning the category-wise correlation in pseudo labels, then apply Latent Feature Rectification Module (LFR) on the latent space to formulate and align the pseudo label distributions of different levels via latent diffusion. It utilizes a denoising network to learn the coarse to fine and fine to precise consecutive distribution transportations. We evaluate DiffRect on three public datasets: ACDC, MS-CMRSEG 2019, and Decathlon Prostate. Experimental results demonstrate the effectiveness of DiffRect, e.g. it achieves 82.40\\% Dice score on ACDC with only 1\\% labeled scan available, outperforms the previous\nstate-of-the-art by 4.60\\% in Dice, and even rivals fully supervised performance. Code will be made publicly available.", "title":"DiffRect: Latent Diffusion Label Rectification for Semi-supervised Medical Image Segmentation", "authors":[ "Liu, Xinyu", "Li, Wuyang", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"2407.09918", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/DiffRect" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":714 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1898_paper.pdf", "bibtext":"@InProceedings{ The_TractOracle_MICCAI2024,\n author = { The\u0301berge, Antoine and Descoteaux, Maxime and Jodoin, Pierre-Marc },\n title = { { TractOracle: towards an anatomically-informed reward function for RL-based tractography } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reinforcement learning (RL)-based tractography is a competitive alternative to machine learning and classical tractography algorithms due to its high anatomical accuracy obtained without the need for any annotated data. However, the reward functions so far used to train RL agents do not encapsulate anatomical knowledge which causes agents to generate spurious false positives tracts. In this paper, we propose a new RL tractography system, TractOracle, which relies on a reward network trained for streamline classification. This network is used both as a reward function during training as well as a mean for stopping the tracking process early and thus reduce the number of false positive streamlines. This makes our system a unique method that evaluates and reconstructs WM streamlines at the same time. We report ratios of true and false positives improved by almost 20\\% on one dataset and a 2x improvement of the amount of true-positives on another dataset, by far the best results ever reported in tractography.", "title":"TractOracle: towards an anatomically-informed reward function for RL-based tractography", "authors":[ "The\u0301berge, Antoine", "Descoteaux, Maxime", "Jodoin, Pierre-Marc" ], "id":"Conference", "arxiv_id":"2403.17845", "GitHub":[ "https:\/\/github.com\/scil-vital\/TrackToLearn" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":715 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3369_paper.pdf", "bibtext":"@InProceedings{ Kar_Longitudinal_MICCAI2024,\n author = { Karaman, Batuhan K. and Dodelzon, Katerina and Akar, Gozde B. and Sabuncu, Mert R. },\n title = { { Longitudinal Mammogram Risk Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Breast cancer is one of the leading causes of mortality among women worldwide. Early detection and risk assessment play a crucial role in improving survival rates. Therefore, annual or biennial mammograms are often recommended for screening in high-risk groups. Mammograms are typically interpreted by expert radiologists based on the Breast Imaging Reporting and Data System (BI-RADS), which provides a uniform way to describe findings and categorizes them to indicate the level of concern for breast cancer. Recently, machine learning (ML) and computational approaches have been developed to automate and improve the interpretation of mammograms. However, both BI-RADS and the ML-based methods focus on the analysis of data from the present and sometimes the most recent prior visit. While it has been shown that temporal changes in image features of longitudinal scans are valuable for quantifying breast cancer risk, no prior work has systematically studied this. In this paper, we extend a state-of-the-art ML model to ingest an arbitrary number of longitudinal mammograms and predict future breast cancer risk. On a large scale dataset, we demonstrate that our model, LoMaR, achieves state-of-the-art performance when presented with only the present mammogram. Furthermore, we use LoMaR to characterize the predictive value of prior visits. Our results show that longer histories (e.g., up to four prior annual mammograms) can significantly boost the accuracy of predicting future breast cancer risk, particularly beyond the short-term. Our code and model weights are available at https:\/\/github.com\/batuhankmkaraman\/LoMaR.", "title":"Longitudinal Mammogram Risk Prediction", "authors":[ "Karaman, Batuhan K.", "Dodelzon, Katerina", "Akar, Gozde B.", "Sabuncu, Mert R." ], "id":"Conference", "arxiv_id":"2404.19083", "GitHub":[ "https:\/\/github.com\/batuhankmkaraman\/LoMaR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":716 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2216_paper.pdf", "bibtext":"@InProceedings{ Gaz_AcneAI_MICCAI2024,\n author = { Gazeau, L\u00e9a and Nguyen, Hang and Nguyen, Zung and Lebedeva, Mariia and Nguyen, Thanh and To, Tat-Dat and Le Digabel, Jimmy and Filiol, J\u00e9rome and Josse, Gwendal and Perlis, Clifford and Wolfe, Jonathan },\n title = { { AcneAI: A new acne severity assessment method using digital images and deep learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"In this paper we present a new AcneAI system that automatically analyses facial acne images in a precise way, detecting and scoring every single acne lesion within an image. Its workflow consists of three main steps: 1) segmentation of all acne and acne-like lesions, 2) scoring of each acne lesion, 3) combining individual acne lesion scores into an overall acne severity score for the whole image, that ranges from 0 to 100. Our clinical tests on the Acne04 dataset shows that AcneAI has an Intraclass Correlation Coefficient (ICC) score of 0.8 in severity classification. We obtained an area under the curve (AUC) of 0.88 in detecting inflammatory lesions in a clinical dataset obtained from a multi-centric clinical trial.", "title":"AcneAI: A new acne severity assessment method using digital images and deep learning", "authors":[ "Gazeau, L\u00e9a", "Nguyen, Hang", "Nguyen, Zung", "Lebedeva, Mariia", "Nguyen, Thanh", "To, Tat-Dat", "Le Digabel, Jimmy", "Filiol, J\u00e9rome", "Josse, Gwendal", "Perlis, Clifford", "Wolfe, Jonathan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/AIpourlapeau\/acne04v2" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":717 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2283_paper.pdf", "bibtext":"@InProceedings{ Wan_Enhancing_MICCAI2024,\n author = { Wang, Diwei and Yuan, Kun and Muller, Candice and Blanc, Fre\u0301de\u0301ric and Padoy, Nicolas and Seo, Hyewon },\n title = { { Enhancing Gait Video Analysis in Neurodegenerative Diseases by Knowledge Augmentation in Vision Language Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present a knowledge augmentation strategy for assessing the diagnostic groups and gait impairment from monocular gait videos. Based on a large-scale pre-trained Vision Language Model (VLM), our model learns and improves visual, textual, and numerical representations of patient gait videos, through a collective learning across three distinct modalities: gait videos, class-specific descriptions, and numerical gait parameters. Our specific contributions are two-fold: First, we adopt a knowledge-aware prompt tuning strategy to utilize the class-specific medical description in guiding the text prompt learning. Second, we integrate the paired gait parameters in the form of numerical texts to enhance the numeracy of the textual representation. Results demonstrate that our model not only significantly outperforms state-of-the-art methods in video-based classification tasks but also adeptly decodes the learned class-specific text features into natural language descriptions using the vocabulary of quantitative gait parameters. The code and the model will be made available at our project page: https:\/\/lisqzqng.github.io\/GaitAnalysisVLM\/.", "title":"Enhancing Gait Video Analysis in Neurodegenerative Diseases by Knowledge Augmentation in Vision Language Model", "authors":[ "Wang, Diwei", "Yuan, Kun", "Muller, Candice", "Blanc, Fre\u0301de\u0301ric", "Padoy, Nicolas", "Seo, Hyewon" ], "id":"Conference", "arxiv_id":"2403.13756", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":718 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2611_paper.pdf", "bibtext":"@InProceedings{ Yan_Finegrained_MICCAI2024,\n author = { Yan, Zhongnuo and Yang, Xin and Luo, Mingyuan and Chen, Jiongquan and Chen, Rusi and Liu, Lian and Ni, Dong },\n title = { { Fine-grained Context and Multi-modal Alignment for Freehand 3D Ultrasound Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Fine-grained spatio-temporal learning is crucial for freehand 3D ultrasound reconstruction. Previous works mainly resorted to the coarse-grained spatial features and the separated temporal dependency learning and struggles for fine-grained spatio-temporal learning. Mining spatio-temporal information in fine-grained scales is extremely challenging due to learning difficulties in long-range dependencies. In this context, we propose a novel method to exploit the long-range dependency management capabilities of the state space model (SSM) to address the above challenge. Our contribution is three-fold. First, we propose ReMamba, which mines multi-scale spatio-temporal information by devising a multi-directional SSM. Second, we propose an adaptive fusion strategy that introduces multiple inertial measurement units as auxiliary temporal information to enhance spatio-temporal perception. Last, we design an online alignment strategy that encodes the temporal information as pseudo labels for multi-modal alignment to further improve reconstruction performance. Extensive experimental validations on two large-scale datasets show remarkable improvement from our method over competitors.", "title":"Fine-grained Context and Multi-modal Alignment for Freehand 3D Ultrasound Reconstruction", "authors":[ "Yan, Zhongnuo", "Yang, Xin", "Luo, Mingyuan", "Chen, Jiongquan", "Chen, Rusi", "Liu, Lian", "Ni, Dong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":719 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0974_paper.pdf", "bibtext":"@InProceedings{ Kon_GazeDETR_MICCAI2024,\n author = { Kong, Yan and Wang, Sheng and Cai, Jiangdong and Zhao, Zihao and Shen, Zhenrong and Li, Yonghao and Fei, Manman and Wang, Qian },\n title = { { Gaze-DETR: Using Expert Gaze to Reduce False Positives in Vulvovaginal Candidiasis Screening } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate detection of vulvovaginal candidiasis is critical for women\u2019s health, yet its sparse distribution and visually ambiguous characteristics pose significant challenges for accurate identification by pathologists and neural networks alike. Our eye-tracking data reveals that areas garnering sustained attention - yet not marked by experts after deliberation - are often aligned with false positives of neural networks. Leveraging this finding, we introduce Gaze-DETR, a pioneering method that integrates gaze data to enhance neural network precision by diminishing false positives. Gaze-DETR incorporates a universal gaze-guided warm-up protocol applicable across various detection methods and a gaze-guided rectification strategy specifically designed for DETR-based models. Our comprehensive tests confirm that Gaze-DETR surpasses existing leading methods, showcasing remarkable improvements in detection accuracy and generalizability. Our code is available at https:\/\/github.com\/YanKong0408\/Gaze-DETR.", "title":"Gaze-DETR: Using Expert Gaze to Reduce False Positives in Vulvovaginal Candidiasis Screening", "authors":[ "Kong, Yan", "Wang, Sheng", "Cai, Jiangdong", "Zhao, Zihao", "Shen, Zhenrong", "Li, Yonghao", "Fei, Manman", "Wang, Qian" ], "id":"Conference", "arxiv_id":"2405.09463", "GitHub":[ "https:\/\/github.com\/YanKong0408\/Gaze-DETR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":720 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2519_paper.pdf", "bibtext":"@InProceedings{ Hua_IOSSAM_MICCAI2024,\n author = { Huang, Xinrui and He, Dongming and Li, Zhenming and Zhang, Xiaofan and Wang, Xudong },\n title = { { IOSSAM: Label Efficient Multi-View Prompt-Driven Tooth Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmenting and labeling teeth from 3D Intraoral Scans (IOS) plays a significant role in digital dentistry. Dedicated learning-based methods have shown impressive results, while they suffer from expensive point-wise annotations. We aim at IOS segmentation with only low-cost 2D bounding-boxes annotations in the occlusal view. To accomplish this objective, we propose a SAM-based multi-view prompt-driven IOS segmentation method (IOSSAM) which learns prompts to utilize the pre-trained shape knowledge embedded in the visual foundation model SAM. Specifically, our method introduces an occlusal prompter trained on a dataset with weak annotations to generate category-related prompts for the occlusal view segmentation. We further develop a dental crown prompter to produce reasonable prompts for the dental crown view segmentation by considering the crown length prior and the generated occlusal view segmentation. We carefully design a novel view-aware label diffusion strategy to lift 2D segmentation to 3D field. We validate our method on a real IOS dataset, and the results show that our method outperforms recent weakly-supervised methods and is even comparable with fully-supervised methods.", "title":"IOSSAM: Label Efficient Multi-View Prompt-Driven Tooth Segmentation", "authors":[ "Huang, Xinrui", "He, Dongming", "Li, Zhenming", "Zhang, Xiaofan", "Wang, Xudong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ar-inspire\/IOSSAM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":721 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0458_paper.pdf", "bibtext":"@InProceedings{ Shi_Centerline_MICCAI2024,\n author = { Shi, Pengcheng and Hu, Jiesi and Yang, Yanwu and Gao, Zilve and Liu, Wei and Ma, Ting },\n title = { { Centerline Boundary Dice Loss for Vascular Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Vascular segmentation in medical imaging plays a crucial role in analysing morphological and functional assessments. Traditional methods, like the centerline Dice (clDice) loss, ensure topology preservation but falter in capturing geometric details, especially under translation and deformation. The combination of clDice with traditional Dice loss can lead to diameter imbalance, favoring larger vessels. Addressing these challenges, we introduce the centerline boundary Dice (cbDice) loss function, which harmonizes topological integrity and geometric nuances, ensuring consistent segmentation across various vessel sizes. cbDice enriches the clDice approach by including boundary-aware aspects, thereby improving geometric detail recognition. It matches the performance of the boundary difference over union (B-DoU) loss through a mask-distance-based approach, enhancing traslation sensitivity. Crucially, cbDice incorporates radius information from vascular skeletons, enabling uniform adaptation to vascular diameter changes and maintaining balance in branch growth and fracture impacts. Furthermore, we conducted a theoretical analysis of clDice variants (cl-X-Dice). We validated cbDice\u2019s efficacy on three diverse vascular segmentation datasets, encompassing both 2D and 3D, and binary and multi-class segmentation. Particularly, the method integrated with cbDice demonstrated outstanding performance on the MICCAI 2023 TopCoW Challenge dataset. Our code is made publicly available at: https:\/\/github.com\/PengchengShi1220\/cbDice.", "title":"Centerline Boundary Dice Loss for Vascular Segmentation", "authors":[ "Shi, Pengcheng", "Hu, Jiesi", "Yang, Yanwu", "Gao, Zilve", "Liu, Wei", "Ma, Ting" ], "id":"Conference", "arxiv_id":"2407.01517", "GitHub":[ "https:\/\/github.com\/PengchengShi1220\/cbDice" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":722 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1481_paper.pdf", "bibtext":"@InProceedings{ Caf_Two_MICCAI2024,\n author = { Cafaro, Alexandre and Dorent, Reuben and Haouchine, Nazim and Lepetit, Vincent and Paragios, Nikos and Wells III, William M. and Frisken, Sarah },\n title = { { Two Projections Suffice for Cerebral Vascular Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"3D reconstruction of cerebral vasculature from 2D biplanar projections could significantly improve diagnosis and treatment planning. We introduce a novel approach to tackle this challenging task by initially backprojecting the two projections, a process that traditionally results in unsatisfactory outcomes due to inherent ambiguities. To overcome this, we employ a U-Net approach trained to resolve these ambiguities, leading to significant improvement in reconstruction quality. The process is further refined using a Maximum A Posteriori strategy with a prior that favors continuity, leading to enhanced 3D reconstructions. We evaluated our approach using a comprehensive dataset comprising segmentations from approximately 700 MR angiography scans, from which we generated paired realistic biplanar DRRs. Upon testing with held-out data, our method achieved an 80% Dice similarity w.r.t the ground truth, superior to existing methods. Our code and dataset are available at https:\/\/github.com\/Wapity\/3DBrainXVascular.", "title":"Two Projections Suffice for Cerebral Vascular Reconstruction", "authors":[ "Cafaro, Alexandre", "Dorent, Reuben", "Haouchine, Nazim", "Lepetit, Vincent", "Paragios, Nikos", "Wells III, William M.", "Frisken, Sarah" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Wapity\/3DBrainXVascular" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":723 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3196_paper.pdf", "bibtext":"@InProceedings{ Bae_Conditional_MICCAI2024,\n author = { Bae, Juyoung and Tong, Elizabeth and Chen, Hao },\n title = { { Conditional Diffusion Model for Versatile Temporal Inpainting in 4D Cerebral CT Perfusion Imaging } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cerebral CT Perfusion (CTP) sequence imaging is a widely used modality for stroke assessment. While high temporal resolution of CT scans is crucial for accurate diagnosis, it correlates to increased radiation exposure. A promising solution is to generate synthetic CT scans to artificially enhance the temporal resolution of the sequence. We present a versatile CTP sequence inpainting model based on a conditional diffusion model, which can inpaint temporal gaps with synthetic scan to a fine 1-second interval, agnostic to both the duration of the gap and the sequence length. We achieve this by incorporating a carefully engineered conditioning scheme that exploits the intrinsic patterns of time-concentration dynamics. Our approach is much more flexible and clinically relevant compared to existing interpolation methods that either (1) lack such perfusion-specific guidances or (2) require all the known scans in the sequence, thereby imposing constraints on the length and acquisition interval. Such flexibility allows our model to be effectively applied to other tasks, such as repairing sequences with significant motion artifacts. Our model can generate accurate and realistic CT scans to inpaint gaps as wide as 8 seconds while achieving both perceptual quality and diagnostic information comparable to the ground-truth 1-second resolution sequence. Extensive experiments demonstrate the superiority of our model over prior arts in numerous metrics and clinical applicability. Our code is available at https:\/\/github.com\/baejustin\/CTP_Inpainting_Diffusion.", "title":"Conditional Diffusion Model for Versatile Temporal Inpainting in 4D Cerebral CT Perfusion Imaging", "authors":[ "Bae, Juyoung", "Tong, Elizabeth", "Chen, Hao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/baejustin\/CTP_Inpainting_Diffusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":724 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3533_paper.pdf", "bibtext":"@InProceedings{ Swa_SAM_MICCAI2024,\n author = { Swain, Bishal R. and Cheoi, Kyung J. and Ko, Jaepil },\n title = { { SAM Guided Task-Specific Enhanced Nuclei Segmentation in Digital Pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cell nuclei segmentation is crucial in digital pathology for various diagnoses and treatments which are prominently performed using semantic segmentation that focus on scalable receptive field and multi-scale information. In such segmentation tasks, U-Net based task-specific encoders excel in capturing fine-grained information but fall short integrating high-level global context. Conversely, foundation models inherently grasp coarse-level features but are not as proficient as task-specific models to provide fine-grained details. To this end, we propose utilizing the foundation model to guide the task-specific supervised learning by dynamically combining their global and local latent representations, via our proposed X-Gated Fusion Block, which uses Gated squeeze and excitation block followed by Cross-attention to dynamically fuse latent representations. Through our experiments across datasets and visualization analysis, we demonstrate that the integration of task-specific knowledge with general insights from foundational models can drastically increase performance, even outperforming domain-specific semantic segmentation models to achieve state-of-the-art results by increasing the Dice score and mIoU by approximately 12% and 17.22% on CryoNuSeg, 15.55% and 16.77% on NuInsSeg, and 9% on both metrics for the CoNIC dataset. Our code will be released at https:\/\/cvpr-kit.github.io\/SAM-Guided-Enhanced-Nuclei-Segmentation\/.", "title":"SAM Guided Task-Specific Enhanced Nuclei Segmentation in Digital Pathology", "authors":[ "Swain, Bishal R.", "Cheoi, Kyung J.", "Ko, Jaepil" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":725 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1255_paper.pdf", "bibtext":"@InProceedings{ Li_Multicategory_MICCAI2024,\n author = { Li, Dongzhe and Yang, Baoyao and Zhan, Weide and He, Xiaochen },\n title = { { Multi-category Graph Reasoning for Multi-modal Brain Tumor Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Many multi-modal tumor segmentation methods have been proposed to localize diseased areas from the brain images, facilitating the intelligence of diagnosis. However, existing studies commonly ignore the relationship between multiple categories in brain tumor segmentation, leading to irrational tumor area distribution in the predictive results. To address this issue, this work proposes a Multi-category Region-guided Graph Reasoning Network, which models the dependency between multiple categories using a Multi-category Interaction Module (TMIM), thus enabling more accurate subregion localization of brain tumors. To improve the recognition of tumors\u2019 blurred boundaries, a Region-guided Reasoning Module is also incorporated into the network, which captures semantic relationships between regions and contours via graph reasoning. In addition, we introduce a shared cross-attention encoder in the feature extraction stage to facilitate the comprehensive utilization of multi-modal information. Experimental results on the BraTS2019 and BraTS2020 datasets demonstrate that our method outperforms the current state-of-the-art methods.", "title":"Multi-category Graph Reasoning for Multi-modal Brain Tumor Segmentation", "authors":[ "Li, Dongzhe", "Yang, Baoyao", "Zhan, Weide", "He, Xiaochen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":726 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0920_paper.pdf", "bibtext":"@InProceedings{ Tah_Enhancing_MICCAI2024,\n author = { Tahghighi, Peyman and Zhang, Yunyan and Souza, Roberto and Komeili, Amin },\n title = { { Enhancing New Multiple Sclerosis Lesion Segmentation via Self-supervised Pre-training and Synthetic Lesion Integration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multiple Sclerosis (MS) is a chronic and severe inflammatory disease of the central nervous system. In MS, the myelin sheath covering nerve fibres is attacked by the self-immune system, leading to communication issues between the brain and the rest of the body. Image-based biomarkers, such as lesions seen with Magnetic Resonance Imaging (MRI), are essential in MS diagnosis and monitoring. Further, detecting newly formed lesions provides crucial information for assessing disease progression and treatment outcomes. However, annotating changes between MRI scans is time-consuming and subject to inter-expert variability. Methods proposed for new lesion segmentation have utilized limited data available for training the model, failing to harness the full capacity of the models and resulting in limited generalizability. To enhance the performance of the new MS lesion segmentation model, we propose a self-supervised pre-training scheme based on image masking that is used to initialize the weights of the model, which then is trained for the new lesion segmentation task using a mix of real and synthetic data created by a synthetic lesion data augmentation method that we propose. Experiments on the MSSEG-2 challenge dataset demonstrate that utilizing self-supervised pre-training and adding synthetic lesions during training improves the model\u2019s performance. We achieved a Dice score of 56.15\u00b17.06% and an F1 score of 56.69\u00b19.12%, which is 2.06% points and 3.3% higher, respectively, than the previous best existing method. Code is available at: https:\/\/github.com\/PeymanTahghighi\/SSLMRI.", "title":"Enhancing New Multiple Sclerosis Lesion Segmentation via Self-supervised Pre-training and Synthetic Lesion Integration", "authors":[ "Tahghighi, Peyman", "Zhang, Yunyan", "Souza, Roberto", "Komeili, Amin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/PeymanTahghighi\/SSLMRI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":727 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3074_paper.pdf", "bibtext":"@InProceedings{ Zhu_Symptom_MICCAI2024,\n author = { Zhu, Ye and Xu, Jingwen and Lyu, Fei and Yuen, Pong C. },\n title = { { Symptom Disentanglement in Chest X-ray Images for Fine-Grained Progression Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Chest radiography is a commonly used diagnostic imaging exam for monitoring disease severity. Machine learning has made significant strides in static tasks (e.g., segmentation or diagnosis) based on a single medical image. However, disease progression monitoring based on longitudinal images remain fairly underexplored, which provides informative clues for early prognosis and timely intervention. In practice, the development of underlying disease typically accompanies with the occurrence and changes of multiple specific symptoms. Inspired by this, we propose a multi-stage framework to model the complex progression from symptom perspective. Specifically, we introduce two consecutive modules namely Symptom Disentangler (SD) and Symptom Progression Learner (SPL) to learn from static diagnosis to dynamic disease development. By explicitly extracting the symptom-specific features from a pair of chest radiographs using a set of learnable symptom-aware embeddings in SD module, the SPL module can leverage these features for obtaining the symptom progression features, which will be utilized for the final progression prediction. Experimental results on the public dataset Chest ImaGenome show superior performance compared to current state-of-the-art method.", "title":"Symptom Disentanglement in Chest X-ray Images for Fine-Grained Progression Learning", "authors":[ "Zhu, Ye", "Xu, Jingwen", "Lyu, Fei", "Yuen, Pong C." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/zhuye98\/SDPL.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":728 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3979_paper.pdf", "bibtext":"@InProceedings{ Wan_SAMMed3DMoE_MICCAI2024,\n author = { Wang, Guoan and Ye, Jin and Cheng, Junlong and Li, Tianbin and Chen, Zhaolin and Cai, Jianfei and He, Junjun and Zhuang, Bohan },\n title = { { SAM-Med3D-MoE: Towards a Non-Forgetting Segment Anything Model via Mixture of Experts for 3D Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Volumetric medical image segmentation is pivotal in enhancing disease diagnosis, treatment planning, and advancing medical research. While existing volumetric foundation models for medical image segmentation, such as SAM-Med3D and SegVol, have shown remarkable performance on general organs and tumors, their ability to segment certain categories in clinical downstream tasks remains limited. Supervised Finetuning (SFT) serves as an effective way to adapt such foundation models for task-specific downstream tasks and achieve remarkable performance in those tasks. However, it would inadvertently degrade the general knowledge previously stored in the original foundation model. In this paper, we propose SAM-Med3D-MoE, a novel framework that seamlessly integrates task-specific finetuned models with the foundational model, creating a unified model at minimal additional training expense for an extra gating network. This gating network, in conjunction with a selection strategy, allows the unified model to achieve comparable performance of the original models in their respective tasks \u2014 both general and specialized \u2014 without updating any parameters of them. Our comprehensive experiments demonstrate the efficacy of SAM-Med3D-MoE, with an average Dice performance increase from 53.2\\% to 56.4\\% on 15 specific classes. It especially gets remarkable gains of 29.6\\%, 8.5\\%, 11.2\\% on the spinal cord, esophagus, and right hip, respectively. Additionally, it achieves 48.9\\% Dice on the challenging SPPIN2023 Challenge, significantly surpassing the general expert\u2019s performance of 32.3\\%. We anticipate that SAM-Med3D-MoE can serve as a new framework for adapting the foundation model to specific areas in medical image analysis. Codes and datasets will be publicly available.", "title":"SAM-Med3D-MoE: Towards a Non-Forgetting Segment Anything Model via Mixture of Experts for 3D Medical Image Segmentation", "authors":[ "Wang, Guoan", "Ye, Jin", "Cheng, Junlong", "Li, Tianbin", "Chen, Zhaolin", "Cai, Jianfei", "He, Junjun", "Zhuang, Bohan" ], "id":"Conference", "arxiv_id":"2407.04938", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":729 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1208_paper.pdf", "bibtext":"@InProceedings{ Zha_Synchronous_MICCAI2024,\n author = { Zhang, Jianhai and Wan, Tonghua and MacDonald, M. Ethan and Menon, Bijoy K. and Qiu, Wu and Ganesh, Aravind },\n title = { { Synchronous Image-Label Diffusion with Anisotropic Noise for Stroke Lesion Segmentation on Non-contrast CT } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Automated segmentation of stroke lesions on non-contrast CT (NCCT) images is essential for efficient diagnosis of stroke patients. Although diffusion probabilistic models have shown promising advancements across various fields, their application to medical imaging exposes limitations due to the use of conventional isotropic Gaussian noise. Isotropic Gaussian noise overlooks the structural information and strong voxel dependencies in medical images. In this paper, a novel framework employing synchronous diffusion processes on image-labels is introduced, combined with a sampling strategy for anisotropic noise, to improve stroke lesion segmentation performance on NCCT. Our method acknowledges the significance of anatomical information during diffusion, contrasting with the traditional diffusion processes that assume isotropic Gaussian noise added to voxels independently. By integrating correlations among image voxels within specific anatomical regions into the denoising process, our approach enhances the robustness of neural networks, resulting in improved accuracy in stroke lesion segmentation.\nThe proposed method has been evaluated on two datasets where experimental results demonstrate the capability of the proposed method to accurately segment ischemic infarcts on NCCT images. Furthermore, comparative analysis against state-of-the-art models, including U-net, transformer, and DPM-based segmentation methods, highlights the advantages of our method in terms of segmentation metrics. The code is publicly available at https:\/\/github.com\/zhangjianhai\/SADPM.", "title":"Synchronous Image-Label Diffusion with Anisotropic Noise for Stroke Lesion Segmentation on Non-contrast CT", "authors":[ "Zhang, Jianhai", "Wan, Tonghua", "MacDonald, M. Ethan", "Menon, Bijoy K.", "Qiu, Wu", "Ganesh, Aravind" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":730 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0078_paper.pdf", "bibtext":"@InProceedings{ Fan_DiffExplainer_MICCAI2024,\n author = { Fang, Yingying and Wu, Shuang and Jin, Zihao and Wang, Shiyi and Xu, Caiwen and Walsh, Simon and Yang, Guang },\n title = { { DiffExplainer: Unveiling Black Box Models Via Counterfactual Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the field of medical imaging, particularly in tasks related to early disease detection and prognosis, understanding the reasoning behind AI model predictions is imperative for assessing their reliability. Conventional explanation methods encounter challenges in identifying decisive features in medical image classifications, especially when discriminative features are subtle or not immediately evident. To address this limitation, we propose an agent model capable of generating counterfactual images that prompt different decisions when plugged into a black box model. By employing this agent model, we can uncover influential image patterns that impact the black model\u2019s final predictions. Through our methodology, we efficiently identify features that influence decisions of the deep black box. We validated our approach in the rigorous domain of medical prognosis tasks, showcasing its efficacy and potential to enhance the reliability of deep learning models in medical image classification compared to existing interpretation methods. The code is available at: \\url{https:\/\/github.com\/ayanglab\/DiffExplainer}.", "title":"DiffExplainer: Unveiling Black Box Models Via Counterfactual Generation", "authors":[ "Fang, Yingying", "Wu, Shuang", "Jin, Zihao", "Wang, Shiyi", "Xu, Caiwen", "Walsh, Simon", "Yang, Guang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ayanglab\/DiffExplainer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":731 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0494_paper.pdf", "bibtext":"@InProceedings{ Cai_Classaware_MICCAI2024,\n author = { Cai, Zhuotong and Xin, Jingmin and Zeng, Tianyi and Dong, Siyuan and Zheng, Nanning and Duncan, James S. },\n title = { { Class-aware Mutual Mixup with Triple Alignments for Semi-Supervised Cross-domain Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised cross-domain segmentation, also referred to as Semi-supervised domain adaptation (SSDA), aims to bridge the domain gap and enhance model performance on the target domain with the limited availability of labeled target samples, lots of unlabeled target samples, and a substantial amount of labeled source samples. However, current SSDA approaches still face challenges in attaining consistent alignment across domains and adequately addressing the segmentation performance for the tail class. In this work, we develop class-aware mutual mixup with triple alignments (CMMTA) for semi-supervised cross-domain segmentation. Specifically, we first propose a class-aware mutual mixup strategy to obtain the maximal diversification of data distribution and enable the model to focus on the tail class. Then, we incorporate our class-aware mutual mixup across three distinct pathways to establish a triple consistent alignment. We further introduce cross knowledge distillation (CKD) with two parallel mean-teacher models for intra-domain and inter-domain alignment, respectively. Experimental results on two public cardiac datasets MM-WHS and MS-CMRSeg demonstrate the superiority of our proposed approach against other state-of-the-art methods under two SSDA settings.", "title":"Class-aware Mutual Mixup with Triple Alignments for Semi-Supervised Cross-domain Segmentation", "authors":[ "Cai, Zhuotong", "Xin, Jingmin", "Zeng, Tianyi", "Dong, Siyuan", "Zheng, Nanning", "Duncan, James S." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":732 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1332_paper.pdf", "bibtext":"@InProceedings{ Bud_Transferring_MICCAI2024,\n author = { Budd, Charlie and Vercauteren, Tom },\n title = { { Transferring Relative Monocular Depth to Surgical Vision with Temporal Consistency } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Relative monocular depth, inferring depth correct to a shift and scale from a single image, is an active research topic. Recent deep learning models, trained on large and varied meta-datasets, now provide excellent performance in the domain of natural images. However, few datasets exist which provide ground truth depth for endoscopic images, making training such models from scratch unfeasible. This work investigates the transfer of these models into the surgical domain, and presents an effective and simple way to improve on standard supervision through the use of temporal consistency self-supervision. We show temporal consistency significantly improves supervised training alone when transferring to the low-data regime of endoscopy, and outperforms the prevalent self-supervision technique for this task. In addition we show our method drastically outperforms the state-of-the-art method from within the domain of endoscopy. We also release our code, models, and ensembled meta-dataset, Meta-MED, establishing a strong benchmark for future work.", "title":"Transferring Relative Monocular Depth to Surgical Vision with Temporal Consistency", "authors":[ "Budd, Charlie", "Vercauteren, Tom" ], "id":"Conference", "arxiv_id":"2403.06683", "GitHub":[ "https:\/\/github.com\/charliebudd\/transferring-relative-monocular-depth-to-surgical-vision" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":733 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2689_paper.pdf", "bibtext":"@InProceedings{ Cha_ANovel_MICCAI2024,\n author = { Chai, Shurong and Jain, Rahul K. and Mo, Shaocong and Liu, Jiaqing and Yang, Yulin and Li, Yinhao and Tateyama, Tomoko and Lin, Lanfen and Chen, Yen-Wei },\n title = { { A Novel Adaptive Hypergraph Neural Network for Enhancing Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical image segmentation is crucial in the field of medical imaging, assisting healthcare professionals in analyzing images and improving diagnostic performance. Recent advancements in Transformer-based networks, which utilize self-attention mechanism, have proven their effectiveness in various medical problems, including medical imaging. However, existing self-attention mechanism in Transformers only captures pairwise correlations among image patches, neglecting non-pairwise correlations that are essential for performance enhancement. On the other hand, recently, graph-based networks have emerged to capture both pairwise and non-pairwise correlations effectively. Inspired by recent Hypergraph Neural Network (HGNN), we propose a novel hypergraph-based network for medical image segmentation. Our contribution lies in formulating novel and efficient HGNN methods for constructing Hyperedges. To effectively aggregate multiple patches with similar attributes at both feature and local levels, we introduce an improved adaptive technique leveraging the K-Nearest Neighbors (KNN) algorithm to enhance the hypergraph construction process. Additionally, we generalize the concept of Convolutional Neural Networks (CNNs) to hypergraphs. Our method achieves state-of-the-art results on two publicly available segmentation datasets, and visualization results further validate its effectiveness. Our code is released on Github: https:\/\/github.com\/11yxk\/AHGNN.", "title":"A Novel Adaptive Hypergraph Neural Network for Enhancing Medical Image Segmentation", "authors":[ "Chai, Shurong", "Jain, Rahul K.", "Mo, Shaocong", "Liu, Jiaqing", "Yang, Yulin", "Li, Yinhao", "Tateyama, Tomoko", "Lin, Lanfen", "Chen, Yen-Wei" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/11yxk\/AHGNN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":734 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1768_paper.pdf", "bibtext":"@InProceedings{ Liu_Structural_MICCAI2024,\n author = { Liu, Kang and Ma, Zhuoqi and Kang, Xiaolu and Zhong, Zhusi and Jiao, Zhicheng and Baird, Grayson and Bai, Harrison and Miao, Qiguang },\n title = { { Structural Entities Extraction and Patient Indications Incorporation for Chest X-ray Report Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"The automated generation of imaging reports proves invaluable in alleviating the workload of radiologists. A clinically applicable reports generation algorithm should demonstrate its effectiveness in producing reports that accurately describe radiology findings and attend to patient-specific indications. In this paper, we introduce a novel method, Structural Entities extraction and patient indications Incorporation (SEI) for chest X-ray report generation. Specifically, we employ a structural entities extraction (SEE) approach to eliminate presentation-style vocabulary in reports and improve the quality of factual entity sequences. This reduces the noise in the following cross-modal alignment module by aligning X-ray images with factual entity sequences in reports, thereby enhancing the precision of cross-modal alignment and further aiding the model in gradient-free retrieval of similar historical cases. Subsequently, we propose a cross-modal fusion network to integrate information from X-ray images, similar historical cases, and patient-specific indications. This process allows the text decoder to attend to discriminative features of X-ray images, assimilate historical diagnostic information from similar cases, and understand the examination intention of patients. This, in turn, assists in triggering the text decoder to produce high-quality reports. Experiments conducted on MIMIC-CXR validate the superiority of SEI over state-of-the-art approaches on both natural language generation and clinical efficacy metrics. The code is available at https:\/\/github.com\/mk-runner\/SEI.", "title":"Structural Entities Extraction and Patient Indications Incorporation for Chest X-ray Report Generation", "authors":[ "Liu, Kang", "Ma, Zhuoqi", "Kang, Xiaolu", "Zhong, Zhusi", "Jiao, Zhicheng", "Baird, Grayson", "Bai, Harrison", "Miao, Qiguang" ], "id":"Conference", "arxiv_id":"2405.14905", "GitHub":[ "https:\/\/github.com\/mk-runner\/SEI" ], "paper_page":"https:\/\/huggingface.co\/papers\/2405.14905", "n_linked_authors":0, "upvotes":0, "num_comments":0, "n_authors":8, "Models":[ "MK-runner\/SEI" ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ "MK-runner\/SEI" ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":1, "type":"Poster", "unique_id":735 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3843_paper.pdf", "bibtext":"@InProceedings{ Has_SemiSupervised_MICCAI2024,\n author = { Hasan, Mahmudul and Hu, Xiaoling and Abousamra, Shahira and Prasanna, Prateek and Saltz, Joel and Chen, Chao },\n title = { { Semi-Supervised Contrastive VAE for Disentanglement of Digital Pathology Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Despite the strong prediction power of deep learning models, their interpretability remains an important concern. Disentanglement models increase interpretability by decomposing the latent space into interpretable subspaces. In this paper, we propose the first disentanglement method for pathology images. We focus on the task of detecting tumor-infiltrating lymphocytes (TIL). We propose different ideas including cascading disentanglement, novel architecture and reconstruction branches. We achieve superior performance on complex pathology images, thus improving the interpretability and even generalization power of TIL detection deep learning models.", "title":"Semi-Supervised Contrastive VAE for Disentanglement of Digital Pathology Images", "authors":[ "Hasan, Mahmudul", "Hu, Xiaoling", "Abousamra, Shahira", "Prasanna, Prateek", "Saltz, Joel", "Chen, Chao" ], "id":"Conference", "arxiv_id":"2410.02012", "GitHub":[ "https:\/\/github.com\/Shauqi\/SS-cVAE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":736 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3024_paper.pdf", "bibtext":"@InProceedings{ Zhe_Reducing_MICCAI2024,\n author = { Zheng, Zixuan and Shi, Yilei and Li, Chunlei and Hu, Jingliang and Zhu, Xiao Xiang and Mou, Lichao },\n title = { { Reducing Annotation Burden: Exploiting Image Knowledge for Few-Shot Medical Video Object Segmentation via Spatiotemporal Consistency Relearning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Few-shot video object segmentation aims to reduce annotation costs; however, existing methods still require abundant dense frame annotations for training, which are scarce in the medical domain. We investigate an extremely low-data regime that utilizes annotations from only a few video frames and leverages existing labeled images to minimize costly video annotations. Specifically, we propose a two-phase framework. First, we learn a few-shot segmentation model using labeled images. Subsequently, to improve performance without full supervision, we introduce a spatiotemporal consistency relearning approach on medical videos that enforces consistency between consecutive frames. Constraints are also enforced between the image model and relearning model at both feature and prediction levels. Experiments demonstrate the superiority of our approach over state-of-the-art few-shot segmentation methods. Our model bridges the gap between abundant annotated medical images and scarce, sparsely labeled medical videos to achieve strong video segmentation performance in this low data regime. Code is available at https:\/\/github.com\/MedAITech\/RAB.", "title":"Reducing Annotation Burden: Exploiting Image Knowledge for Few-Shot Medical Video Object Segmentation via Spatiotemporal Consistency Relearning", "authors":[ "Zheng, Zixuan", "Shi, Yilei", "Li, Chunlei", "Hu, Jingliang", "Zhu, Xiao Xiang", "Mou, Lichao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MedAITech\/RAB" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":737 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0605_paper.pdf", "bibtext":"@InProceedings{ Ada_Physics_MICCAI2024,\n author = { Adams-Tew, Samuel I. and Od\u00e9en, Henrik and Parker, Dennis L. and Cheng, Cheng-Chieh and Madore, Bruno and Payne, Allison and Joshi, Sarang },\n title = { { Physics informed neural networks for estimation of tissue properties from multi-echo configuration state MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"This work investigates the use of configuration state imaging together with deep neural networks to develop quantitative MRI techniques for deployment in an interventional setting. A physics modeling technique for inhomogeneous fields and heterogeneous tissues is presented and used to evaluate the theoretical capability of neural networks to estimate parameter maps from configuration state signal data. All tested normalization strategies achieved similar performance in estimating T2 and T2*. Varying network architecture and data normalization had substantial impacts on estimated flip angle and T1, highlighting their importance in developing neural networks to solve these inverse problems. The developed signal modeling technique provides an environment that will enable the development and evaluation of physics-informed machine learning techniques for MR parameter mapping and facilitate the development of quantitative MRI techniques to inform clinical decisions during MR-guided treatments.", "title":"Physics informed neural networks for estimation of tissue properties from multi-echo configuration state MRI", "authors":[ "Adams-Tew, Samuel I.", "Od\u00e9en, Henrik", "Parker, Dennis L.", "Cheng, Cheng-Chieh", "Madore, Bruno", "Payne, Allison", "Joshi, Sarang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/fuslab-uofu\/mri-signal-model" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":738 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3702_paper.pdf", "bibtext":"@InProceedings{ Uro_Knowledgegrounded_MICCAI2024,\n author = { Urooj Khan, Aisha and Garrett, John and Bradshaw, Tyler and Salkowski, Lonie and Jeong, Jiwoong and Tariq, Amara and Banerjee, Imon },\n title = { { Knowledge-grounded Adaptation Strategy for Vision-language Models: Building a Unique Case-set for Screening Mammograms for Residents Training } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"A visual-language model (VLM) pre-trained on natural images and text pairs poses a significant barrier when applied to medical contexts due to domain shift. Yet, adapting or fine-tuning these VLMs for medical use presents considerable hurdles, including domain misalignment, limited access to extensive datasets, and high-class imbalances. Hence, there is a pressing need for strategies to effectively adapt these VLMs to the medical domain, as such adaptations would prove immensely valuable in healthcare applications. In this study, we propose a framework designed to adeptly tailor VLMs to the medical domain, employing selective sampling and hard-negative mining techniques for enhanced performance in retrieval tasks. \nWe validate the efficacy of our proposed approach by implementing it across two distinct VLMs: the in-domain VLM (MedCLIP) and out-of-domain VLMs (ALBEF).\nWe assess the performance of these models both in their original off-the-shelf state and after undergoing our proposed training strategies, using two extensive datasets containing mammograms and their corresponding reports. Our evaluation spans zero-shot, few-shot, and supervised scenarios. Through our approach, we observe a notable enhancement in Recall@K performance for the image-text retrieval task.", "title":"Knowledge-grounded Adaptation Strategy for Vision-language Models: Building a Unique Case-set for Screening Mammograms for Residents Training", "authors":[ "Urooj Khan, Aisha", "Garrett, John", "Bradshaw, Tyler", "Salkowski, Lonie", "Jeong, Jiwoong", "Tariq, Amara", "Banerjee, Imon" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/aurooj\/VLM_SS.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":739 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0563_paper.pdf", "bibtext":"@InProceedings{ Xie_pFLFE_MICCAI2024,\n author = { Xie, Luyuan and Lin, Manqing and Liu, Siyuan and Xu, ChenMing and Luan, Tianyu and Li, Cong and Fang, Yuejian and Shen, Qingni and Wu, Zhonghai },\n title = { { pFLFE: Cross-silo Personalized Federated Learning via Feature Enhancement on Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"In medical image segmentation, personalized cross-silo federated learning (FL) is becoming popular for utilizing varied data across healthcare settings to overcome data scarcity and privacy concerns. However, existing methods often suffer from client drift, leading to inconsistent performance and delayed training. We propose a new framework, Personalized Federated Learning via Feature Enhancement (pFLFE), designed to mitigate these challenges. pFLFE consists of two main stages: feature enhancement and supervised learning. The first stage improves differentiation between foreground and background features, and the second uses these enhanced features for learning from segmentation masks. We also design an alternative training approach that requires fewer communication rounds without compromising segmentation quality, even with limited communication resources. Through experiments on three medical segmentation tasks, we demonstrate that pFLFE outperforms the state-of-the-art methods.", "title":"pFLFE: Cross-silo Personalized Federated Learning via Feature Enhancement on Medical Image Segmentation", "authors":[ "Xie, Luyuan", "Lin, Manqing", "Liu, Siyuan", "Xu, ChenMing", "Luan, Tianyu", "Li, Cong", "Fang, Yuejian", "Shen, Qingni", "Wu, Zhonghai" ], "id":"Conference", "arxiv_id":"2407.00462", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":740 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1246_paper.pdf", "bibtext":"@InProceedings{ Cai_Masked_MICCAI2024,\n author = { Cai, Yuxin and Zhang, Jianhai and He, Lei and Ganesh, Aravind and Qiu, Wu },\n title = { { Masked Residual Diffusion Probabilistic Model with Regional Asymmetry Prior for Generating Perfusion Maps from Multi-phase CTA } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multiphase CT angiography (mCTA) has become an important diagnostic tool for acute ischemic stroke (AIS), offering insights into occlusion sites and collateral circulation. However, its broader application is hindered by the need for specialized interpretation, contrasting with the intuitive nature of CT perfusion (CTP). In this work, we propose a novel diffusion based generative model to generate CTP-like perfusion maps, enhancing AIS diagnosis in resource-limited settings. Unlike traditional diffusion models that restore images by predicting the added noise, our approach uses a masked residual diffusion probabilistic model (MRDPM) to recover the residuals between the predicted and target image within brain regions of interests for more detailed generation. To target denoising efforts on relevant regions, noise is selectively added into the brain area only during diffusion. Furthermore, a Multi-scale Asymmetry Prior module and a Brain Region-Aware Network are proposed to incorporate anatomical prior information into the MRDPM to generate finer details while ensuring consistency. Experimental evaluations with 514 patient images demonstrate that our proposed method is able to generate high quality CTP-like perfusion maps, outperforming several other generative models regarding the metrics of MAE, LPIPS, SSIM, and PSNR. The code is publicly available at https:\/\/github.com\/UniversalCAI\/MRDPM-with-RAP.", "title":"Masked Residual Diffusion Probabilistic Model with Regional Asymmetry Prior for Generating Perfusion Maps from Multi-phase CTA", "authors":[ "Cai, Yuxin", "Zhang, Jianhai", "He, Lei", "Ganesh, Aravind", "Qiu, Wu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/UniversalCAI\/MRDPM-with-RAP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":741 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2957_paper.pdf", "bibtext":"@InProceedings{ Lu_Spot_MICCAI2024,\n author = { Lu, Zilin and Xie, Yutong and Zeng, Qingjie and Lu, Mengkang and Wu, Qi and Xia, Yong },\n title = { { Spot the Difference: Difference Visual Question Answering with Residual Alignment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Difference Visual Question Answering (DiffVQA) introduces a new task aimed at understanding and responding to questions regarding the disparities observed between two images. Unlike traditional medical VQA tasks, DiffVQA closely mirrors the diagnostic procedures of radiologists, who frequently conduct longitudinal comparisons of images taken at different time points for a given patient. This task accentuates the discrepancies between images captured at distinct temporal intervals.To better address the variations, this paper proposes a novel Residual Alignment model (ReAl) tailored for DiffVQA. ReAl is designed to produce flexible and accurate answers by analyzing the discrepancies in chest X-ray images of the same patient across different time points. Compared to the previous method, ReAl additionally aid a residual input branch, where the residual of two images is fed into this branch. Additionally, a Residual Feature Alignment (RFA) module is introduced to ensure that ReAl effectively captures and learns the disparities between corresponding images. Experimental evaluations conducted on the MIMIC-Diff-VQA dataset demonstrate the superiority of ReAl over previous state-of-the-art methods, consistently achieving better performance. Ablation experiments further validate the effectiveness of the RFA module in enhancing the model\u2019s attention to differences. The code implementation of the proposed approach will be made available.", "title":"Spot the Difference: Difference Visual Question Answering with Residual Alignment", "authors":[ "Lu, Zilin", "Xie, Yutong", "Zeng, Qingjie", "Lu, Mengkang", "Wu, Qi", "Xia, Yong" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":742 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1756_paper.pdf", "bibtext":"@InProceedings{ Qiu_Towards_MICCAI2024,\n author = { Qiu, Xinmei and Wang, Fan and Sun, Yongheng and Lian, Chunfeng and Ma, Jianhua },\n title = { { Towards Graph Neural Networks with Domain-Generalizable Explainability for fMRI-Based Brain Disorder Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Graph neural networks (GNNs) represent a cutting-edge methodology in diagnosing brain disorders via fMRI data. Explainability and generalizability are two critical issues of GNNs for fMRI-based diagnoses, considering the high complexity of functional brain networks and the strong variations in fMRI data across different clinical centers. Although there have been many studies on GNNs\u2019 explainability and generalizability, yet few have addressed both aspects simultaneously. In this paper, we unify these two issues and revisit the domain generalization (DG) of fMRI-based diagnoses from the view of explainability. That is, we aim to learn domain-generalizable explanation factors to enhance center-agnostic graph representation learning and therefore brain disorder diagnoses. To this end, a specialized meta-learning framework coupled with explainability-generalizable (XG) regularizations is designed to learn diagnostic GNN models (termed XG-GNN) from fMRI BOLD signals. Our XG-GNN features the ability to build nonlinear functional networks in a task-oriented fashion. More importantly, the group-wise differences of such learned individual networks can be stably captured and maintained to unseen fMRI centers to jointly boost the DG of diagnostic explainability and accuracy. Experimental results on the ABIDE dataset demonstrate the effectiveness of our XG-GNN. Our source code will be publicly released.", "title":"Towards Graph Neural Networks with Domain-Generalizable Explainability for fMRI-Based Brain Disorder Diagnosis", "authors":[ "Qiu, Xinmei", "Wang, Fan", "Sun, Yongheng", "Lian, Chunfeng", "Ma, Jianhua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":743 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1074_paper.pdf", "bibtext":"@InProceedings{ Xu_DiRecT_MICCAI2024,\n author = { Xu, Xuanang and Lee, Jungwook and Lampen, Nathan and Kim, Daeseung and Kuang, Tianshu and Deng, Hannah H. and Liebschner, Michael A. K. and Gateno, Jaime and Yan, Pingkun },\n title = { { DiRecT: Diagnosis and Reconstruction Transformer for Mandibular Deformity Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the realm of orthognathic surgical planning, the precision of mandibular deformity diagnosis is paramount to ensure favorable treatment outcomes. Traditional methods, reliant on the meticulous identification of bony landmarks via radiographic imaging techniques such as cone beam computed tomography (CBCT), are both resource-intensive and costly. In this paper, we present a novel way to diagnose mandibular deformities in which we harness facial landmarks detectable by off-the-shelf generic models, thus eliminating the necessity for bony landmark identification. We propose the Diagnosis-Reconstruction Transformer (DiRecT), an advanced network that exploits the automatically detected 3D facial landmarks to assess mandibular deformities. DiRecT\u2019s training is augmented with an auxiliary task of landmark reconstruction and is further enhanced by a teacher-student semi-supervised learning framework, enabling effective utilization of both labeled and unlabeled data to learn discriminative representations. Our study encompassed a comprehensive set of experiments utilizing an in-house clinical dataset of 101 subjects, alongside a public non-medical dataset of 1,519 subjects. The experimental results illustrate that our method markedly streamlines the mandibular deformity diagnostic workflow and exhibits promising diagnostic performance when compared with the baseline methods, which demonstrates DiRecT\u2019s potential as an alternative to conventional diagnostic protocols in the field of orthognathic surgery. Source code is publicly available at https:\/\/github.com\/RPIDIAL\/DiRecT.", "title":"DiRecT: Diagnosis and Reconstruction Transformer for Mandibular Deformity Assessment", "authors":[ "Xu, Xuanang", "Lee, Jungwook", "Lampen, Nathan", "Kim, Daeseung", "Kuang, Tianshu", "Deng, Hannah H.", "Liebschner, Michael A. K.", "Gateno, Jaime", "Yan, Pingkun" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/RPIDIAL\/DiRecT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":744 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1885_paper.pdf", "bibtext":"@InProceedings{ Son_Progressive_MICCAI2024,\n author = { Son, Moo Hyun and Bae, Juyoung and Tong, Elizabeth and Chen, Hao },\n title = { { Progressive Knowledge Distillation for Automatic Perfusion Parameter Maps Generation from Low Temporal Resolution CT Perfusion Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Perfusion Parameter Maps (PPMs), generated from Computer Tomography Perfusion (CTP) scans, deliver detailed measurements of cerebral blood flow and volume, crucial for the early identification and strategic treatment of cerebrovascular diseases. However, the acquisition of PPMs involves significant challenges. Firstly, the accuracy of these maps heavily relies on the manual selection of Arterial Input Function (AIF) information. Secondly, patients are subjected to considerable radiation exposure during the scanning process. In response, previous researches have attempted to automate AIF selection and reduce radiation exposure of CTP by lowering temporal resolution, utilizing deep learning to predict PPMs from automated AIF selection and temporal resolutions as low as 1\/3. However, the effectiveness of these approaches remains marginally significant. In this paper, we push the limits and propose a novel framework, Progressive Knowledge Distillation (PKD), to generate accurate PPMs from 1\/16 standard temporal resolution CTP scans. PKD uses a series of teacher networks, each trained on different temporal resolutions, for knowledge distillation. Initially, the student network learns from a teacher with low temporal resolution; as the student is trained, the teacher is scaled to a higher temporal resolution. This progressive approach aims to reduce the large initial knowledge gap between the teacher and the student. Experimental results demonstrate that PKD can generate PPMs comparable to full-resolution ground truth, outperforming current deep learning frameworks.", "title":"Progressive Knowledge Distillation for Automatic Perfusion Parameter Maps Generation from Low Temporal Resolution CT Perfusion Images", "authors":[ "Son, Moo Hyun", "Bae, Juyoung", "Tong, Elizabeth", "Chen, Hao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/mhson-kyle\/progressive-kd" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":745 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3209_paper.pdf", "bibtext":"@InProceedings{ Rid_HuLP_MICCAI2024,\n author = { Ridzuan, Muhammad and Shaaban, Mai A. and Saeed, Numan and Sobirov, Ikboljon and Yaqub, Mohammad },\n title = { { HuLP: Human-in-the-Loop for Prognosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"This paper introduces HuLP, a Human-in-the-Loop for Prognosis model designed to enhance the reliability and interpretability of prognostic models in clinical contexts, especially when faced with the complexities of missing covariates and outcomes. HuLP offers an innovative approach that enables human expert intervention, empowering clinicians to interact with and correct models\u2019 predictions, thus fostering collaboration between humans and AI models to produce more accurate prognosis. Additionally, HuLP addresses the challenges of missing data by utilizing neural networks and providing a tailored methodology that effectively handles missing data. Traditional methods often struggle to capture the nuanced variations within patient populations, leading to compromised prognostic predictions. HuLP imputes missing covariates based on imaging features, aligning more closely with clinician workflows and enhancing reliability. We conduct our experiments on two real-world, publicly available medical datasets to demonstrate the superiority and competitiveness of HuLP. Our code is available at https:\/\/github.com\/BioMedIA-MBZUAI\/HuLP.", "title":"HuLP: Human-in-the-Loop for Prognosis", "authors":[ "Ridzuan, Muhammad", "Shaaban, Mai A.", "Saeed, Numan", "Sobirov, Ikboljon", "Yaqub, Mohammad" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/BioMedIA-MBZUAI\/HuLP" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":746 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3725_paper.pdf", "bibtext":"@InProceedings{ Yua_Longitudinally_MICCAI2024,\n author = { Yuan, Xinrui and Cheng, Jiale and Hu, Dan and Wu, Zhengwang and Wang, Li and Lin, Weili and Li, Gang },\n title = { { Longitudinally Consistent Individualized Prediction of Infant Cortical Morphological Development } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neurodevelopment is exceptionally dynamic and critical during infancy, as many neurodevelopmental disorders emerge from abnormal brain development during this stage. Obtaining a full trajectory of neurodevelopment from existing incomplete longitudinal data can enrich our limited understanding of normal early brain development and help identify neurodevelopmental disorders. Although many regression models and deep learning methods have been proposed for longitudinal prediction based on incomplete datasets, they have two major drawbacks. First, regression models suffered from the strict requirements of input and output time points, which is less useful in practical scenarios. Second, although existing deep learning methods could predict cortical development at multiple ages, they predicted missing data independently with each available scan, yielding inconsistent predictions for a target time point given multiple inputs, which ignores longitudinal dependencies and introduces ambiguity in practical applications. To this end, we emphasize temporal consistency and develop a novel, flexible framework named longitudinally consistent triplet disentanglement autoencoder to predict an individualized longitudinal cortical developmental trajectory based on each available input by encouraging the similarity among trajectories with a dynamic time-warping loss. Specifically, to achieve individualized prediction, we employ a surfaced-based autoencoder, which decomposes the encoded latent features into identity-related and age-related features with an age estimation task and identity similarity loss as supervisions. These identity-related features are further combined with age conditions in the latent space to generate longitudinal developmental trajectories with the decoder. Experiments on predicting longitudinal infant cortical property maps validate the superior longitudinal consistency and exactness of our results compared to baselines\u2019.", "title":"Longitudinally Consistent Individualized Prediction of Infant Cortical Morphological Development", "authors":[ "Yuan, Xinrui", "Cheng, Jiale", "Hu, Dan", "Wu, Zhengwang", "Wang, Li", "Lin, Weili", "Li, Gang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":747 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0345_paper.pdf", "bibtext":"@InProceedings{ Din_Physicalpriorsguided_MICCAI2024,\n author = { Ding, Zhengyao and Hu, Yujian and Zhang, Hongkun and Wu, Fei and Yang, Shifeng and Du, Xiaolong and Xiang, Yilang and Li, Tian and Chu, Xuesen and Huang, Zhengxing },\n title = { { Physical-priors-guided Aortic Dissection Detection using Non-Contrast-Enhanced CT images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Aortic dissection (AD) is a severe cardiovascular emergency requiring prompt and precise diagnosis for better survival chances. Given the limited use of Contrast-Enhanced Computed Tomography (CE-CT) in routine clinical screenings, this study presents a new method that enhances the diagnostic process using Non-Contrast-Enhanced CT (NCE-CT) images. In detail, we integrate biomechanical and hemodynamic physical priors into a 3D U-Net model and utilize a transformer encoder to extract superior global features, along with a cGAN-inspired discriminator for the generation of realistic CE-CT-like images. The proposed model not only innovates AD detection on NCE-CT but also provides a safer alternative for patients contraindicated for contrast agents. Comparative evaluations and ablation studies against existing methods demonstrate the superiority of our model in terms of recall, AUC, and F1 score metrics standing at 0.882, 0.855, and 0.829, respectively. Incorporating physical priors into diagnostics offers a significant, nuanced, and non-invasive advancement, seamlessly integrating medical imaging with the dynamic aspects of human physiology. Our code is available at https:\/\/github.com\/Yukui-1999\/PIAD.", "title":"Physical-priors-guided Aortic Dissection Detection using Non-Contrast-Enhanced CT images", "authors":[ "Ding, Zhengyao", "Hu, Yujian", "Zhang, Hongkun", "Wu, Fei", "Yang, Shifeng", "Du, Xiaolong", "Xiang, Yilang", "Li, Tian", "Chu, Xuesen", "Huang, Zhengxing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Yukui-1999\/PIAD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":748 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0877_paper.pdf", "bibtext":"@InProceedings{ Li_KARGEN_MICCAI2024,\n author = { Li, Yingshu and Wang, Zhanyu and Liu, Yunyi and Wang, Lei and Liu, Lingqiao and Zhou, Luping },\n title = { { KARGEN: Knowledge-enhanced Automated Radiology Report Generation Using Large Language Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Harnessing the robust capabilities of Large Language Models (LLMs) for narrative generation, logical reasoning, and common-sense knowledge integration, this study delves into utilizing LLMs to enhance automated radiology report generation (R2Gen). Despite the wealth of knowledge within LLMs, efficiently triggering relevant knowledge within these large models for specific tasks like R2Gen poses a critical research challenge. This paper presents KARGEN, a Knowledge-enhanced Automated radiology Report GENeration framework based on LLMs. Utilizing a frozen LLM to generate reports, the framework integrates a knowledge graph to unlock chest disease-related knowledge within the LLM to enhance the clinical utility of generated reports. This is achieved by leveraging the knowledge graph to distill disease-related features in a designed way. Since a radiology report encompasses both normal and disease-related findings, the extracted graph-enhanced disease-related features are integrated with regional image features, attending to both aspects. We explore two fusion methods to automatically prioritize and select the most relevant features. The fused features are employed by LLM to generate reports that are more sensitive to diseases and of improved quality. Our approach demonstrates promising results on the MIMIC-CXR and IU-Xray datasets. Our code will be available on GitHub.", "title":"KARGEN: Knowledge-enhanced Automated Radiology Report Generation Using Large Language Models", "authors":[ "Li, Yingshu", "Wang, Zhanyu", "Liu, Yunyi", "Wang, Lei", "Liu, Lingqiao", "Zhou, Luping" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":749 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0441_paper.pdf", "bibtext":"@InProceedings{ Hun_CrossSlice_MICCAI2024,\n author = { Hung, Alex Ling Yu and Zheng, Haoxin and Zhao, Kai and Pang, Kaifeng and Terzopoulos, Demetri and Sung, Kyunghyun },\n title = { { Cross-Slice Attention and Evidential Critical Loss for Uncertainty-Aware Prostate Cancer Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Current deep learning-based models typically analyze medical images in either 2D or 3D albeit disregarding volumetric information or suffering sub-optimal performance due to the anisotropic resolution of MR data. Furthermore, providing an accurate uncertainty estimation is beneficial to clinicians, as it indicates how confident a model is about its prediction. We propose a novel 2.5D cross-slice attention model that utilizes both global and local information, along with an evidential critical loss, to perform evidential deep learning for the detection in MR images of prostate cancer, one of the most common cancers and a leading cause of cancer-related death in men. We perform extensive experiments with our model on two different datasets and achieve state-of-the-art performance in prostate cancer detection along with improved epistemic uncertainty estimation. The implementation of the model is available at https:\/\/github.com\/aL3x-O-o-Hung\/GLCSA_ECLoss.", "title":"Cross-Slice Attention and Evidential Critical Loss for Uncertainty-Aware Prostate Cancer Detection", "authors":[ "Hung, Alex Ling Yu", "Zheng, Haoxin", "Zhao, Kai", "Pang, Kaifeng", "Terzopoulos, Demetri", "Sung, Kyunghyun" ], "id":"Conference", "arxiv_id":"2407.01146", "GitHub":[ "https:\/\/github.com\/aL3x-O-o-Hung\/GLCSA_ECLoss" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":750 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1149_paper.pdf", "bibtext":"@InProceedings{ Yan_Cardiovascular_MICCAI2024,\n author = { Yang, Zefan and Zhang, Jiajin and Wang, Ge and Kalra, Mannudeep K. and Yan, Pingkun },\n title = { { Cardiovascular Disease Detection from Multi-View Chest X-rays with BI-Mamba } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate prediction of Cardiovascular disease (CVD) risk in medical imaging is central to effective patient health management. Previous studies have demonstrated that imaging features in computed tomography (CT) can help predict CVD risk. However, CT entails notable radiation exposure, which may result in adverse health effects for patients. In contrast, chest X-ray emits significantly lower levels of radiation, offering a safer option. This rationale motivates our investigation into the feasibility of using chest X-ray for predicting CVD risk. Convolutional Neural Networks (CNNs) and Transformers are two established network architectures for computer-aided diagnosis. However, they struggle to model very high resolution chest X-ray due to the lack of large context modeling power or quadratic time complexity. Inspired by state space sequence models (SSMs), a new class of network architectures with competitive sequence modeling power as Transfomers and linear time complexity, we propose Bidirectional Image Mamba (BI-Mamba) to complement the unidirectional SSMs with opposite directional information. BI-Mamba utilizes parallel forward and backwark blocks to encode longe-range dependencies of multi-view chest X-rays. We conduct extensive experiments on images from 10,395 subjects in National Lung Screening Trail (NLST). Results show that BI-Mamba outperforms ResNet-50 and ViT-S with comparable parameter size, and saves significant amount of GPU memory during training. Besides, BI-Mamba achieves promising performance compared with previous state of the art in CT, unraveling the potential of chest X-ray for CVD risk prediction.", "title":"Cardiovascular Disease Detection from Multi-View Chest X-rays with BI-Mamba", "authors":[ "Yang, Zefan", "Zhang, Jiajin", "Wang, Ge", "Kalra, Mannudeep K.", "Yan, Pingkun" ], "id":"Conference", "arxiv_id":"2405.18533", "GitHub":[ "https:\/\/github.com\/RPIDIAL\/BI-Mamba" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":751 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3064_paper.pdf", "bibtext":"@InProceedings{ Zhu_Stealing_MICCAI2024,\n author = { Zhu, Meilu and Yang, Qiushi and Gao, Zhifan and Liu, Jun and Yuan, Yixuan },\n title = { { Stealing Knowledge from Pre-trained Language Models for Federated Classifier Debiasing } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning (FL) has shown great potential in medical image computing since it provides a decentralized learning paradigm that allows multiple clients to train a model collaboratively without privacy leakage. However, current studies have shown that heterogeneous data of clients causes biased classifiers of local models during training, leading to the performance degradation of a federation system. In experiments, we surprisingly found that continuously freezing local classifiers can significantly improve the performance of the baseline FL method (FedAvg) for heterogeneous data. This observation motivates us to pre-construct a high-quality initial classifier for local models and freeze it during local training to avoid classifier biases.\nWith this insight, we propose a novel approach named Federated Classifier deBiasing (FedCB) to solve the classifier biases problem in heterogeneous federated learning. The core idea behind FedCB is to exploit linguistic knowledge from pre-trained language models (PLMs) to construct high-quality local classifiers. Specifically, FedCB first collects the class concepts from clients and then uses a set of prompts to contextualize them, yielding language descriptions of these concepts. These descriptions are fed into a pre-trained language model to obtain their text embeddings. The generated embeddings are sent to clients to estimate the distribution of each category in the semantic space. Regarding these distributions as the local classifiers, we perform the alignment between the image representations and the corresponding semantic distribution by minimizing an upper bound of the expected cross-entropy loss. Extensive experiments on public datasets demonstrate the superior performance of FedCB compared to state-of-the-art methods. The source code is available at https:\/\/github.com\/CUHK-AIM-Group\/FedCB.", "title":"Stealing Knowledge from Pre-trained Language Models for Federated Classifier Debiasing", "authors":[ "Zhu, Meilu", "Yang, Qiushi", "Gao, Zhifan", "Liu, Jun", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/FedCB" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":752 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3580_paper.pdf", "bibtext":"@InProceedings{ Sin_CoBooM_MICCAI2024,\n author = { Singh, Azad and Mishra, Deepak },\n title = { { CoBooM: Codebook Guided Bootstrapping for Medical Image Representation Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Self-supervised learning (SSL) has emerged as a promising paradigm for medical image analysis by harnessing unannotated data. Despite their potential, the existing SSL approaches overlook the high anatomical similarity inherent in medical images. This makes it challenging for SSL methods to capture diverse semantic content in medical images consistently. This work introduces a novel and generalized solution that implicitly exploits anatomical similarities by integrating codebooks in SSL. The codebook serves as a concise and informative dictionary of visual patterns, which not only aids in capturing nuanced anatomical details but also facilitates the creation of robust and generalized feature representations. In this context, we propose CoBooM, a novel framework for self-supervised medical image learning by integrating continuous and discrete representations. The continuous component ensures the preservation of fine-grained details, while the discrete aspect facilitates coarse-grained feature extraction through the structured embedding space. To understand the effectiveness of CoBooM, we conduct a comprehensive evaluation of various medical datasets encompassing chest X-rays and fundus images. The experimental results reveal a significant performance gain in classification and segmentation tasks.", "title":"CoBooM: Codebook Guided Bootstrapping for Medical Image Representation Learning", "authors":[ "Singh, Azad", "Mishra, Deepak" ], "id":"Conference", "arxiv_id":"2408.04262", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":753 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1629_paper.pdf", "bibtext":"@InProceedings{ Hua_LIDIA_MICCAI2024,\n author = { Huang, Wei and Liu, Wei and Zhang, Xiaoming and Yin, Xiaoli and Han, Xu and Li, Chunli and Gao, Yuan and Shi, Yu and Lu, Le and Zhang, Ling and Zhang, Lei and Yan, Ke },\n title = { { LIDIA: Precise Liver Tumor Diagnosis on Multi-Phase Contrast-Enhanced CT via Iterative Fusion and Asymmetric Contrastive Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"The early detection and precise diagnosis of liver tumors are tasks of critical clinical value, yet they pose significant challenges due to the high heterogeneity and variability of liver tumors. In this work, a precise LIver tumor DIAgnosis network on multi-phase contrast-enhanced CT, named LIDIA, is proposed for real-world scenario. To fully utilize all available phases in contrast-enhanced CT, LIDIA first employs the iterative fusion module to aggregate variable numbers of image phases, thereby capturing the features of lesions at different phases for better tumor diagnosis. To effectively mitigate the high heterogeneity problem of liver tumors, LIDIA incorporates asymmetric contrastive learning to enhance the discriminability between different classes. To evaluate our method, we constructed a large-scale dataset comprising 1,921 patients and 8,138 lesions. LIDIA has achieved an average AUC of 93.6% across eight different types of lesions, demonstrating its effectiveness.", "title":"LIDIA: Precise Liver Tumor Diagnosis on Multi-Phase Contrast-Enhanced CT via Iterative Fusion and Asymmetric Contrastive Learning", "authors":[ "Huang, Wei", "Liu, Wei", "Zhang, Xiaoming", "Yin, Xiaoli", "Han, Xu", "Li, Chunli", "Gao, Yuan", "Shi, Yu", "Lu, Le", "Zhang, Ling", "Zhang, Lei", "Yan, Ke" ], "id":"Conference", "arxiv_id":"2407.13217", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":754 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0372_paper.pdf", "bibtext":"@InProceedings{ Che_Detecting_MICCAI2024,\n author = { Chen, Jianan and Ramanathan, Vishwesh and Xu, Tony and Martel, Anne L. },\n title = { { Detecting noisy labels with repeated cross-validations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Machine learning models experience deteriorated performance when trained in the presence of noisy labels. This is particularly problematic for medical tasks, such as survival prediction, which typically face high label noise complexity with few clear-cut solutions. Inspired by the large fluctuations across folds in the cross-validation performance of survival analyses, we design Monte-Carlo experiments to show that such fluctuation could be caused by label noise. We propose two novel and straightforward label noise detection algorithms that effectively identify noisy examples by pinpointing the samples that more frequently contribute to inferior cross-validation results. We first introduce Repeated Cross-Validation (ReCoV), a parameter-free label noise detection algorithm that is robust to model choice. We further develop fastReCoV, a less robust but more tractable and efficient variant of ReCoV suitable for deep learning applications. Through extensive experiments, we show that ReCoV and fastReCoV achieve state-of-the-art label noise detection performance in a wide range of modalities, models and tasks, including survival analysis, which has yet to be addressed in the literature. Our code and data are publicly available at https:\/\/github.com\/GJiananChen\/ReCoV.", "title":"Detecting noisy labels with repeated cross-validations", "authors":[ "Chen, Jianan", "Ramanathan, Vishwesh", "Xu, Tony", "Martel, Anne L." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/GJiananChen\/ReCoV" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":755 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0962_paper.pdf", "bibtext":"@InProceedings{ Yu_PatchSlide_MICCAI2024,\n author = { Yu, Jiahui and Wang, Xuna and Ma, Tianyu and Li, Xiaoxiao and Xu, Yingke },\n title = { { Patch-Slide Discriminative Joint Learning for Weakly-Supervised Whole Slide Image Representation and Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"In computational pathology, Multiple Instance Learning (MIL) is widely applied for classifying Giga-pixel whole slide images (WSIs) with only im-age-level labels. Due to the size and prominence of positive areas varying significantly across different WSIs, it is difficult for existing methods to learn task-specific features accurately. Additionally, subjective label noise usually affects deep learning frameworks, further hindering the mining of discriminative features. To address this problem, we propose an effective theory that optimizes patch and WSI feature extraction jointly, enhancing feature discriminability. Powered by this theory, we develop an angle-guided MIL framework called PSJA-MIL, effectively leveraging features at both levels. We also focus on eliminating noise between instances and em-phasizing feature enhancement within WSIs. We evaluate our approach on Camelyon17 and TCGA-Liver datasets, comparing it against state-of-the-art methods. The experimental results show significant improvements in accu-racy and generalizability, surpassing the latest methods by more than 2%. Code will be available at: https:\/\/github.com\/sm8754\/PSJAMIL.", "title":"Patch-Slide Discriminative Joint Learning for Weakly-Supervised Whole Slide Image Representation and Classification", "authors":[ "Yu, Jiahui", "Wang, Xuna", "Ma, Tianyu", "Li, Xiaoxiao", "Xu, Yingke" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/sm8754\/PSJAMIL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":756 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2874_paper.pdf", "bibtext":"@InProceedings{ Sir_CenterlineDiameters_MICCAI2024,\n author = { Sirazitdinov, Ilyas and Dylov, Dmitry V. },\n title = { { Centerline-Diameters Data Structure for Interactive Segmentation of Tube-shaped Objects } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Interactive segmentation techniques are in high demand in medical imaging, where the user-machine interactions are to address the imperfections of a model and to speed up the manual annotation. All recently proposed interactive approaches have kept the segmentation mask at the core, an inefficient trait if complex elongated shapes, such as wires, catheters, or veins, need to be segmented. Herein, we propose a new data structure and the corresponding click encoding scheme for the interactive segmentation of such elongated objects, without the masks. Our data structure is based on the set of centerline and diameters, providing a good trade-off between the filament-free contouring and the pixel-wise accuracy of the prediction. Given a simple, intuitive, and interpretable setup, the new data structure can be readily integrated into existing interactive segmentation frameworks.", "title":"Centerline-Diameters Data Structure for Interactive Segmentation of Tube-shaped Objects", "authors":[ "Sirazitdinov, Ilyas", "Dylov, Dmitry V." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":757 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0173_paper.pdf", "bibtext":"@InProceedings{ Yan_Advancing_MICCAI2024,\n author = { Yang, Yanwu and Chen, Hairui and Hu, Jiesi and Guo, Xutao and Ma, Ting },\n title = { { Advancing Brain Imaging Analysis Step-by-step via Progressive Self-paced Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent advancements in deep learning have shifted the development of brain imaging analysis. However, several challenges remain, such as heterogeneity, individual variations, and the contradiction between the high dimensionality and small size of brain imaging datasets. These issues complicate the learning process, preventing models from capturing intrinsic, meaningful patterns and potentially leading to suboptimal performance due to biases and overfitting. Curriculum learning (CL) presents a promising solution by organizing training examples from simple to complex, mimicking the human learning process, and potentially fostering the development of more robust and accurate models. Despite its potential, the inherent limitations posed by small initial training datasets present significant challenges, including overfitting and poor generalization. In this paper, we introduce the Progressive Self-Paced Distillation (PSPD) framework, employing an adaptive and progressive pacing and distillation mechanism. This allows for dynamic curriculum adjustments based on the states of both past and present models. The past model serves as a teacher, guiding the current model with gradually refined curriculum knowledge and helping prevent the loss of previously acquired knowledge. We validate PSPD\u2019s efficacy and adaptability across various convolutional neural networks using the Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI) dataset, underscoring its superiority in enhancing model performance and generalization capabilities", "title":"Advancing Brain Imaging Analysis Step-by-step via Progressive Self-paced Learning", "authors":[ "Yang, Yanwu", "Chen, Hairui", "Hu, Jiesi", "Guo, Xutao", "Ma, Ting" ], "id":"Conference", "arxiv_id":"2407.16128", "GitHub":[ "https:\/\/github.com\/Hrychen7\/PSPD" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":758 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1765_paper.pdf", "bibtext":"@InProceedings{ Sha_Confidence_MICCAI2024,\n author = { Sharma, Saurabh and Kumar, Atul and Chandra, Joydeep },\n title = { { Confidence Matters: Enhancing Medical Image Classification Through Uncertainty-Driven Contrastive Self-Distillation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"The scarcity of data in medical image classification using deep learning often leads to overfitting the training data. Research indicates that self-distillation techniques, particularly those employing mean teacher ensembling, can alleviate this issue. However, directly transferring knowledge distillation (KD) from computer vision to medical image classification yields subpar results due to higher intra-class variance and class imbalance in medical images. This can cause supervised and contrastive learning-based solutions to become biased towards the majority class, resulting in misclassification. To address this, we propose UDCD, an uncertainty-driven contrastive learning-based self-distillation framework that regulates the transfer of contrastive and supervised knowledge, ensuring only relevant knowledge is transferred from the teacher to the student for fine-grained knowledge transfer. By controlling the outcome of the transferable contrastive and teacher\u2019s supervised knowledge based on confidence levels, our framework better classifies images under higher intra- and inter-relation constraints with class imbalance raised due to data scarcity, distilling only useful knowledge to the student. Extensive experiments conducted on benchmark datasets such as HAM10000 and APTOS validate the superiority of our proposed method. The code is available at https:\/\/github.com\/philsaurabh\/UDCD_MICCAI.", "title":"Confidence Matters: Enhancing Medical Image Classification Through Uncertainty-Driven Contrastive Self-Distillation", "authors":[ "Sharma, Saurabh", "Kumar, Atul", "Chandra, Joydeep" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/philsaurabh\/UDCD_MICCAI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":759 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3080_paper.pdf", "bibtext":"@InProceedings{ Zha_Knowledgedriven_MICCAI2024,\n author = { Zhang, Yupei and Wang, Xiaofei and Meng, Fangliangzi and Tang, Jin and Li, Chao },\n title = { { Knowledge-driven Subspace Fusion and Gradient Coordination for Multi-modal Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Most recently, molecular pathology has played a crucial role in cancer diagnosis and prognosis assessment. Deep learning-based methods have been proposed for integrating multi-modal genomic and histology data for efficient molecular pathology analysis. However, current multi-modal approaches simply treat each modality equally, ignoring the modal unique information and the complex correlation across modalities, which hinders the effective multi-modal feature representation for downstream tasks. Besides, considering the intrinsic complexity in tumour ecosystem, where both tumour cells and tumor microenvironment (TME) contribute to the cancer status, it is challenging to utilize a single embedding space to model the mixed genomic profiles of the tumour ecosystem. To tackle these challenges, in this paper, we propose a biologically interpretative and robust multi-modal learning framework to efficiently integrate histology images and genomics data. \n Specifically, to enhance cross-modal interactions, we design a knowledge-driven subspace fusion scheme, consisting a cross-modal deformable attention module and a gene-guided consistency strategy, which Additionally, in pursuit of dynamically optimizing the subspace knowledge, we further propose a novel gradient coordinatio n learning strategy. Extensive experiments on two public datasets demonstrate the effectiveness of our proposed method, outperforming state-of-the-art techniques in three downstream tasks of glioma diagnosis, tumour grading, and survival analysis.", "title":"Knowledge-driven Subspace Fusion and Gradient Coordination for Multi-modal Learning", "authors":[ "Zhang, Yupei", "Wang, Xiaofei", "Meng, Fangliangzi", "Tang, Jin", "Li, Chao" ], "id":"Conference", "arxiv_id":"2406.13979", "GitHub":[ "https:\/\/github.com\/helenypzhang\/Subspace-Multimodal-Learning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":760 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2861_paper.pdf", "bibtext":"@InProceedings{ Gam_Disentangled_MICCAI2024,\n author = { Gamgam, Gurur and Kabakcioglu, Alkan and Y\u00fcksel Dal, Demet and Acar, Burak },\n title = { { Disentangled Attention Graph Neural Network for Alzheimer\u2019s Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neurodegenerative disorders, notably Alzheimer\u2019s Disease type Dementia (ADD), are recognized for their imprint on brain connectivity. Recent investigations employing Graph Neural Networks (GNNs) have demonstrated considerable promise in diagnosing ADD. Among the various GNN architectures, attention-based GNNs have gained prominence due to their capacity to emphasize diagnostically significant alterations in neural connectivity while suppressing irrelevant ones. Nevertheless, a notable limitation observed in attention-based GNNs pertains to the homogeneity of attention coefficients across different attention heads, suggesting a tendency for the GNN to overlook spatially localized critical alterations at the subnetwork scale (mesoscale). In response to this challenge, we propose a novel Disentangled Attention GNN (DAGNN) model\ntrained to discern attention coefficients across different heads. We show that DAGNN can generate uncorrelated latent representations across heads, potentially learning localized representations at mesoscale. We empirically show that these latent representations are superior to state-of-the-art GNN based representations in ADD diagnosis while providing insight to spatially localized changes in connectivity.", "title":"Disentangled Attention Graph Neural Network for Alzheimer\u2019s Disease Diagnosis", "authors":[ "Gamgam, Gurur", "Kabakcioglu, Alkan", "Y\u00fcksel Dal, Demet", "Acar, Burak" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/gururgg\/DAGNN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":761 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2773_paper.pdf", "bibtext":"@InProceedings{ Bui_VisualTextual_MICCAI2024,\n author = { Bui, Phuoc-Nguyen and Le, Duc-Tai and Choo, Hyunseung },\n title = { { Visual-Textual Matching Attention for Lesion Segmentation in Chest Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Lesion segmentation in chest images is crucial for AI-assisted diagnostic systems of pulmonary conditions. The multi-modal approach, which combines image and text description, has achieved notable performance in medical image segmentation. However, the existing methods mainly focus on improving the decoder using the text information while the encoder remains unexplored. In this study, we introduce a Multi-Modal Input UNet model, namely MMI-UNet, which utilizes visual-textual matching (VTM) features for infected areas segmentation in chest X-ray images. These VTM features, which contain visual features that are relevant to the text description, are created by a combination of self-attention and cross-attention mechanisms in a novel Image-Text Matching (ITM) module integrated into the encoder. Empirically, extensive evaluations on the QaTa-Cov19 and MosMedData+ datasets demonstrate MMI-UNet\u2019s state-of-the-art performance over both uni-modal and previous multi-modal methods. Furthermore, our method also outperforms the best uni-modal method even with 15% of the training data. These findings highlight the interpretability of our vision-language model, advancing the explainable diagnosis of pulmonary diseases and reducing the labeling cost for segmentation tasks in the medical field. The source code is made publicly available on GitHub.", "title":"Visual-Textual Matching Attention for Lesion Segmentation in Chest Images", "authors":[ "Bui, Phuoc-Nguyen", "Le, Duc-Tai", "Choo, Hyunseung" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/nguyenpbui\/MMI-UNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":762 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4034_paper.pdf", "bibtext":"@InProceedings{ Lee_Convolutional_MICCAI2024,\n author = { Lee, DongEon and Park, Chunsu and Lee, SeonYeong and Lee, SiYeoul and Kim, MinWoo },\n title = { { Convolutional Implicit Neural Representation of pathology whole-slide images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study explored the application of implicit neural representations (INRs) to enhance digital histopathological imaging. Traditional imaging methods rely on discretizing the image space into grids, managed through a pyramid file structure to accommodate the large size of whole slide images (WSIs); however, the continuous mapping capability of INRs, utilizing a multi-layer perceptron (MLP) to encode images directly from coordinates, presents a transformative approach. This method promises to streamline WSI management by eliminating the need for down-sampled versions, allowing instantaneous access to any image region at the desired magnification, thereby optimizing memory usage and reducing data storage requirements. Despite their potential, INRs face challenges in accurately representing high spatial frequency components that are pivotal in histopathology. To address this gap, we introduce a novel INR framework that integrates auxiliary convolutional neural networks (CNN) with a standard MLP model. This dual-network approach not only facilitates pixel-level analysis, but also enhances the representation of local spatial variations, which is crucial for accurately rendering the complex patterns found in WSIs. Our experimental findings indicated a substantial improvement in the fidelity of histopathological image representation, as evidenced by a 3-6 dB increase in the peak signal-to-noise ratio compared to existing methods. This advancement underscores the potential of INRs to revolutionize digital histopathology, offering a pathway towards more efficient diagnostic imaging techniques. Our code is available at https:\/\/pnu-amilab.github.io\/CINR\/", "title":"Convolutional Implicit Neural Representation of pathology whole-slide images", "authors":[ "Lee, DongEon", "Park, Chunsu", "Lee, SeonYeong", "Lee, SiYeoul", "Kim, MinWoo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/pnu-amilab\/CINR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":763 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1633_paper.pdf", "bibtext":"@InProceedings{ Luo_Rethinking_MICCAI2024,\n author = { Luo, Xiangde and Li, Zihan and Zhang, Shaoting and Liao, Wenjun and Wang, Guotai },\n title = { { Rethinking Abdominal Organ Segmentation (RAOS) in the clinical scenario: A robustness evaluation benchmark with challenging cases } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has enabled great strides in abdominal multi-organ segmentation, even surpassing junior oncologists on common cases or organs. However, robustness on corner cases and complex organs remains a challenging open problem for clinical adoption. To investigate model robustness, we collected and annotated the RAOS dataset comprising 413 CT scans (~80k 2D images, ~8k 3D organ annotations) from 413 patients each with 17 (female) or 19 (male) labelled organs, manually delineated by oncologists. We grouped scans based on clinical information into 1) diagnosis\/radiotherapy (317 volumes), 2) partial excision without the whole organ missing (22 volumes), and 3) excision with the whole organ missing (74 volumes). RAOS provides a potential benchmark for evaluating model robustness including organ hallucination. It also includes some organs that can be very hard to access on public datasets like the rectum, colon, intestine, prostate and seminal vesicles. We benchmarked several state-of-the-art methods in these three clinical groups to evaluate performance and robustness. We also assessed cross-generalization between RAOS and three public datasets. This dataset and comprehensive analysis establish a potential baseline for future robustness research.", "title":"Rethinking Abdominal Organ Segmentation (RAOS) in the clinical scenario: A robustness evaluation benchmark with challenging cases", "authors":[ "Luo, Xiangde", "Li, Zihan", "Zhang, Shaoting", "Liao, Wenjun", "Wang, Guotai" ], "id":"Conference", "arxiv_id":"2406.13674", "GitHub":[ "https:\/\/github.com\/Luoxd1996\/RAOS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":764 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3740_paper.pdf", "bibtext":"@InProceedings{ Ars_Singlesource_MICCAI2024,\n author = { Arslan, Mazlum Ferhat and Guo, Weihong and Li, Shuo },\n title = { { Single-source Domain Generalization in Deep Learning Segmentation via Lipschitz Regularization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning methods have proven useful in medical image segmentation when deployed on independent and identically distributed (iid) data.\nHowever, their effectiveness in generalizing to previously unseen domains, where data may deviate from the iid assumption, remains an open problem.\nIn this paper, we consider the single-source domain generalization scenario where models are trained on data from a single domain and are expected to be robust under domain shifts.\nOur approach focuses on leveraging the spectral properties of images to enhance generalization performance.\nSpecifically, we argue that the high frequency regime contains domain-specific information in the form of device-specific noise and exemplify this case via data from multiple domains.\nOvercoming this challenge is non-trivial since crucial segmentation information such as edges is also encoded in this regime.\nWe propose a simple regularization method, Lipschitz regularization via frequency spectrum (LRFS), that limits the sensitivity of a model\u2019s latent representations to the high frequency components in the source domain while encouraging the sensitivity to middle frequency components.\nThis regularization approach frames the problem as approximating and controlling the Lipschitz constant for high frequency components.\nLRFS can be seamlessly integrated into existing approaches.\nOur experimental results indicate that LRFS can significantly improve the generalization performance of a variety of models.", "title":"Single-source Domain Generalization in Deep Learning Segmentation via Lipschitz Regularization", "authors":[ "Arslan, Mazlum Ferhat", "Guo, Weihong", "Li, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/kaptres\/LRFS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":765 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1240_paper.pdf", "bibtext":"@InProceedings{ Kru_Cryotrack_MICCAI2024,\n author = { Krumb, Henry J. and Mehtali, Jonas and Verde, Juan and Mukhopadhyay, Anirban and Essert, Caroline },\n title = { { Cryotrack: Planning and Navigation for Computer Assisted Cryoablation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Needle guidance in thermal ablation procedures is challenging due to the absence of a free line-of-sight. To date, the needle trajectory is manually planned on a pre-operative CT slice, and then the entry point is transferred with a ruler on patient and needle. Usually, the needle is inserted in multiple strokes with interleaved control CTs, increasing the number of exchanges between OR and control room and exposure of the patient to radiation. This procedure is not only tedious, but also introduces a navigation error of several centimeters if the entry point was not chosen precisely. In this paper, we present Cryotrack, a pre- and intra-operative planning assistant for needle guidance in cryoablation. Cryotrack computes possible insertion areas under the use of a pre-operative CT and its segmentation, considering obstacles (bones) and risk structures. During the intervention, cryotrack supports the clinician by supplying intraoperative guidance with a user-friendly 3D interface. Our system is evaluated in a phantom study with an experienced surgeon and two novice operators, showing that Cryotrack reduces the overall time of the intervention to a fourth while being on par with the traditional planning in terms of safety and accuracy, and being usable by novices.", "title":"Cryotrack: Planning and Navigation for Computer Assisted Cryoablation", "authors":[ "Krumb, Henry J.", "Mehtali, Jonas", "Verde, Juan", "Mukhopadhyay, Anirban", "Essert, Caroline" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Cryotrack" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":766 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1535_paper.pdf", "bibtext":"@InProceedings{ Kon_Aframework_MICCAI2024,\n author = { Konuk, Emir and Welch, Robert and Christiansen, Filip and Epstein, Elisabeth and Smith, Kevin },\n title = { { A framework for assessing joint human-AI systems based on uncertainty estimation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"We investigate the role of uncertainty quantification in aiding medical decision-making. Existing evaluation metrics fail to capture the practical utility of joint human-AI decision-making systems. To address this, we introduce a novel framework to assess such systems and use it to benchmark a diverse set of confidence and uncertainty estimation methods. Our results show that certainty measures enable joint human-AI systems to outperform both standalone humans and AIs, and that for a given system there exists an optimal balance in the number of cases to refer to humans, beyond which the system\u2019s performance degrades.", "title":"A framework for assessing joint human-AI systems based on uncertainty estimation", "authors":[ "Konuk, Emir", "Welch, Robert", "Christiansen, Filip", "Epstein, Elisabeth", "Smith, Kevin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":767 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0748_paper.pdf", "bibtext":"@InProceedings{ Zha_CentertoEdge_MICCAI2024,\n author = { Zhao, Jianfeng and Li, Shuo },\n title = { { Center-to-Edge Denoising Diffusion Probabilistic Models with Cross-domain Attention for Undersampled MRI Reconstruction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Integrating dual-domain (i.e. frequency domain and spatial domain) information for magnetic resonance imaging (MRI) reconstruction from undersampled measurements greatly improves imaging efficiency. However, it is still a challenging task using the denoising diffusion probabilistic models (DDPM)-based method, due to the lack of an effective fusion module to integrate dual-domain information, and there is no work exploring the effect that comes from denoising diffusion strategy on dual-domain. In this study, we propose a novel center-to-edge DDPM (C2E-DDPM) for fully-sampled MRI reconstruction from undersampled measurements (i.e. undersampled k-space and undersampled MR image) by improving the learning ability in the frequency domain and cross-domain information attention. Different from previous work, C2E-DDPM provides a C2E denoising diffusion strategy for facilitating frequency domain learning and designs an attention-guided cross-domain junction for integrating dual-domain information. Experiments indicated that our proposed C2E-DDPM achieves state-of-the-art performances in the dataset fastMRI (i.e. The scores of PSNR\/SSIM of 33.26\/88.43 for 4x acceleration and 31.67\/81.94 for 8x acceleration).", "title":"Center-to-Edge Denoising Diffusion Probabilistic Models with Cross-domain Attention for Undersampled MRI Reconstruction", "authors":[ "Zhao, Jianfeng", "Li, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":768 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3732_paper.pdf", "bibtext":"@InProceedings{ Zhe_FewShot_MICCAI2024,\n author = { Zheng, Meng and Planche, Benjamin and Gao, Zhongpai and Chen, Terrence and Radke, Richard J. and Wu, Ziyan },\n title = { { Few-Shot 3D Volumetric Segmentation with Multi-Surrogate Fusion } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Conventional 3D medical image segmentation methods typically require learning heavy 3D networks (e.g., 3D-UNet), as well as large amounts of in-domain data with accurate pixel\/voxel-level labels to avoid overfitting. These solutions are thus extremely time- and labor-expensive, but also may easily fail to generalize to unseen objects during training. To alleviate this issue, we present MSFSeg, a novel few-shot 3D segmentation framework with a lightweight multi-surrogate fusion (MSF). MSFSeg is able to automatically segment unseen 3D objects\/organs (during training) provided with one or a few annotated 2D slices or 3D sequence segments, via learning dense query-support organ\/lesion anatomy correlations across patient populations.\nOur proposed MSF module mines comprehensive and diversified morphology correlations between unlabeled and the few labeled slices\/sequences through multiple designated surrogates, making it able to generate accurate cross-domain 3D segmentation masks given annotated slices or sequences. We demonstrate the effectiveness of our proposed framework by showing superior performance on conventional few-shot segmentation benchmarks compared to prior art, and remarkable cross-domain cross-volume segmentation performance on proprietary 3D segmentation datasets for challenging entities, i.e. tubular structures, with only limited 2D or 3D labels.", "title":"Few-Shot 3D Volumetric Segmentation with Multi-Surrogate Fusion", "authors":[ "Zheng, Meng", "Planche, Benjamin", "Gao, Zhongpai", "Chen, Terrence", "Radke, Richard J.", "Wu, Ziyan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":769 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2707_paper.pdf", "bibtext":"@InProceedings{ Jia_Hierarchical_MICCAI2024,\n author = { Jiang, Yu and He, Zhibin and Peng, Zhihao and Yuan, Yixuan },\n title = { { Hierarchical Graph Learning with Small-World Brain Connectomes for Cognitive Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Functional magnetic resonance imaging (fMRI) is capable of assessing an individual\u2019s cognitive abilities by measuring blood oxygen level dependence. Due to the complexity of brain structure and function, exploring the relationship between cognitive ability and brain functional connectivity is extremely challenging. Recently, graph neural networks have been employed to extract functional connectivity features for predicting cognitive scores. Nevertheless, these methods have two main limitations: 1) Ignore the hierarchical nature of brain: discarding fine-grained information within each brain region, and overlooking supplementary information on the functional hierarchy of the brain at multiple scales; 2) Ignore the small-world nature of brain: current methods for generating functional connectivity produce regular networks with relatively low information transmission efficiency. To address these issues, we propose a \\textit{Hierarchical Graph Learning with Small-World Brain Connectomes} (SW-HGL) framework for cognitive prediction. This framework consists of three modules: the pyramid information extraction module (PIE), the small-world brain connectomes construction module (SW-BCC), and the hierarchical graph learning module (HGL). Specifically, PIE identifies representative vertices at both micro-scale (community level) and macro-scale (region level) through community clustering and graph pooling. SW-BCC simulates the small-world nature of brain by rewiring regular networks and establishes functional connections at both region and community levels. MSFEF is a dual-branch network used to extract and fuse micro-scale and macro-scale features for cognitive score prediction. Compared to state-of-the-art methods, our SW-HGL consistently achieves outstanding performance on HCP dataset.", "title":"Hierarchical Graph Learning with Small-World Brain Connectomes for Cognitive Prediction", "authors":[ "Jiang, Yu", "He, Zhibin", "Peng, Zhihao", "Yuan, Yixuan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CUHK-AIM-Group\/SW-HGL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":770 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1293_paper.pdf", "bibtext":"@InProceedings{ Zha_TextPolyp_MICCAI2024,\n author = { Zhao, Yiming and Zhou, Yi and Zhang, Yizhe and Wu, Ye and Zhou, Tao },\n title = { { TextPolyp: Point-supervised Polyp Segmentation with Text Cues } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Polyp segmentation in colonoscopy images is essential for preventing Colorectal cancer (CRC). Existing polyp segmentation models often struggle with costly pixel-wise annotations. Conversely, datasets can be annotated quickly and affordably using weak labels like points. However, utilizing sparse annotations for model training remains challenging due to the limited information. In this study, we propose a TextPolyp approach to address this issue by leveraging only point annotations and text cues for effective weakly-supervised polyp segmentation. Specifically, we utilize the Grounding DINO algorithm and Segment Anything Model (SAM) to generate initial pseudo-labels, which are then refined with point annotations. Subsequently, we employ a SAM-based mutual learning strategy to effectively enhance segmentation results from SAM. Additionally, we propose a Discrepancy-aware Weight Scheme (DWS) to adaptively reduce the impact of unreliable predictions from SAM. Our TextPolyp model is versatile and can seamlessly integrate with various backbones and segmentation methods. More importantly, the proposed strategies are used exclusively during training, incurring no additional computational cost during inference. Extensive experiments confirm the effectiveness of our TextPolyp approach.", "title":"TextPolyp: Point-supervised Polyp Segmentation with Text Cues", "authors":[ "Zhao, Yiming", "Zhou, Yi", "Zhang, Yizhe", "Wu, Ye", "Zhou, Tao" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/taozh2017\/TextPolyp" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":771 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2259_paper.pdf", "bibtext":"@InProceedings{ Han_MeshBrush_MICCAI2024,\n author = { Han, John J. and Acar, Ayberk and Kavoussi, Nicholas and Wu, Jie Ying },\n title = { { MeshBrush: Painting the Anatomical Mesh with Neural Stylization for Endoscopy } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Style transfer is a promising approach to close the sim-to-real gap in medical endoscopy. Rendering synthetic endoscopic videos by traversing pre-operative scans (such as MRI or CT) can generate structurally accurate simulations as well as ground truth camera poses and depth maps. Although image-to-image (I2I) translation models such as CycleGAN can imitate realistic endoscopic images from these simulations, they are unsuitable for video-to-video synthesis due to the lack of temporal consistency, resulting in artifacts between frames. We propose MeshBrush, a neural mesh stylization method to synthesize temporally consistent videos with differentiable rendering. MeshBrush uses the underlying geometry of patient imaging data while leveraging existing I2I methods. With learned per-vertex textures, the stylized mesh guarantees consistency while producing high-fidelity outputs. We demonstrate that mesh stylization is a promising approach for creating realistic simulations for downstream tasks such as training networks and preoperative planning. Although our method is tested and designed for ureteroscopy, its components are transferable to general endoscopic and laparoscopic procedures. The code will be made public on GitHub.", "title":"MeshBrush: Painting the Anatomical Mesh with Neural Stylization for Endoscopy", "authors":[ "Han, John J.", "Acar, Ayberk", "Kavoussi, Nicholas", "Wu, Jie Ying" ], "id":"Conference", "arxiv_id":"2404.02999", "GitHub":[ "https:\/\/github.com\/juseonghan\/MeshBrush" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":772 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0242_paper.pdf", "bibtext":"@InProceedings{ Xu_Transforming_MICCAI2024,\n author = { Xu, Huan and Wu, Jinlin and Cao, Guanglin and Chen, Zhen and Lei, Zhen and Liu, Hongbin },\n title = { { Transforming Surgical Interventions with Embodied Intelligence for Ultrasound Robotics } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Ultrasonography has revolutionized non-invasive diagnostic methodologies, significantly enhancing patient outcomes across various medical domains. Despite its advancements, integrating ultrasound technology with robotic systems for automated scans presents challenges, including limited command understanding and dynamic execution capabilities. To address these challenges, this paper introduces a novel Ultrasound Embodied Intelligence system that synergistically combines ultrasound robots with large language models (LLMs) and domain-specific knowledge augmentation, enhancing ultrasound robots\u2019 intelligence and operational efficiency. Our approach employs a dual strategy: firstly, integrating LLMs with ultrasound robots to interpret doctors\u2019 verbal instructions into precise motion planning through a comprehensive understanding of ultrasound domain knowledge, including APIs and operational manuals; secondly, incorporating a dynamic execution mechanism, allowing for real-time adjustments to scanning plans based on patient movements or procedural errors. We demonstrate the effectiveness of our system through extensive experiments, including ablation studies and comparisons across various models, showcasing significant improvements in executing medical procedures from verbal commands. Our findings suggest that the proposed system improves the efficiency and quality of ultrasound scans and paves the way for further advancements in autonomous medical scanning technologies, with the potential to transform non-invasive diagnostics and streamline medical workflows. The source code is available at https:\/\/github.com\/seanxuu\/EmbodiedUS.", "title":"Transforming Surgical Interventions with Embodied Intelligence for Ultrasound Robotics", "authors":[ "Xu, Huan", "Wu, Jinlin", "Cao, Guanglin", "Chen, Zhen", "Lei, Zhen", "Liu, Hongbin" ], "id":"Conference", "arxiv_id":"2406.12651", "GitHub":[ "https:\/\/github.com\/seanxuu\/EmbodiedUS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":773 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3954_paper.pdf", "bibtext":"@InProceedings{ Wan_CarDcros_MICCAI2024,\n author = { Wang, Yuli and Hsu, Wen-Chi and Shi, Victoria and Lin, Gigin and Lin, Cheng Ting and Feng, Xue and Bai, Harrison },\n title = { { Car-Dcros: A Dataset and Benchmark for Enhancing Cardiovascular Artery Segmentation through Disconnected Components Repair and Open Curve Snake } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The segmentation of cardiovascular arteries in 3D medical images holds significant promise for assessing vascular health. Despite the progress in current methodologies, there remain significant challenges, especially in the precise segmentation of smaller vascular structures and those affected by arterial plaque, which often present as disconnected in images. Addressing these issues, we introduce an innovative refinement method that utilizes a data-driven strategy to correct the appearance of disconnected arterial structures. Initially, we create a synthetic dataset designed to mimic the appearance of disconnected cardiovascular structures. Our method then re-frames the segmentation issue as a task of detecting disconnected points, employing a neural network trained to identify points that can link the disconnected components. We further integrate an open curve active contour model, which facilitates the seamless connection of these points while ensuring smoothness. The effectiveness and clinical relevance of our methodology are validated through an application on an actual dataset from a medical institution.", "title":"Car-Dcros: A Dataset and Benchmark for Enhancing Cardiovascular Artery Segmentation through Disconnected Components Repair and Open Curve Snake", "authors":[ "Wang, Yuli", "Hsu, Wen-Chi", "Shi, Victoria", "Lin, Gigin", "Lin, Cheng Ting", "Feng, Xue", "Bai, Harrison" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/YuliWanghust\/CTA_repairment" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":774 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0803_paper.pdf", "bibtext":"@InProceedings{ Liu_Affinity_MICCAI2024,\n author = { Liu, Mengjun and Song, Zhiyun and Chen, Dongdong and Wang, Xin and Zhuang, Zixu and Fei, Manman and Zhang, Lichi and Wang, Qian },\n title = { { Affinity Learning Based Brain Function Representation for Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Resting-state functional magnetic resonance imaging (rs-fMRI) serves as a potent means to quantify brain functional connectivity (FC), which holds potential in diagnosing diseases. However, conventional FC measures may fall short in encapsulating the intricate functional dynamics of the brain; for instance, FC computed via Pearson correlation merely captures linear statistical dependencies among signals from different brain regions. In this study, we propose an affinity learning framework for modeling FC, leveraging a pre-training model to discern informative function representation among brain regions. Specifically, we employ randomly sampled patches and encode them to generate region embeddings, which are subsequently utilized by the proposed affinity learning module to deduce function representation between any pair of regions via an affinity encoder and a signal reconstruction decoder. Moreover, we integrate supervision from large language model (LLM) to incorporate prior brain function knowledge. We evaluate the efficacy of our framework across two datasets. The results from downstream brain disease diagnosis tasks underscore the effectiveness and generalizability of the acquired function representation. In summary, our approach furnishes a novel perspective on brain function representation in connectomics. Our code is available at https:\/\/github.com\/mjliu2020\/ALBFR.", "title":"Affinity Learning Based Brain Function Representation for Disease Diagnosis", "authors":[ "Liu, Mengjun", "Song, Zhiyun", "Chen, Dongdong", "Wang, Xin", "Zhuang, Zixu", "Fei, Manman", "Zhang, Lichi", "Wang, Qian" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":775 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1182_paper.pdf", "bibtext":"@InProceedings{ Xia_FedIA_MICCAI2024,\n author = { Xiang, Yangyang and Wu, Nannan and Yu, Li and Yang, Xin and Cheng, Kwang-Ting and Yan, Zengqiang },\n title = { { FedIA: Federated Medical Image Segmentation with Heterogeneous Annotation Completeness } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Federated learning has emerged as a compelling paradigm for medical image segmentation, particularly in light of increasing privacy concerns. However, most of the existing research relies on relatively stringent assumptions regarding the uniformity and completeness of annotations across clients. Contrary to this, this paper highlights a prevalent challenge in medical practice: incomplete annotations. Such annotations can introduce incorrectly labeled pixels, potentially undermining the performance of neural networks in supervised learning. To tackle this issue, we introduce a novel solution, named FedIA. Our insight is to conceptualize incomplete annotations as noisy data (i.e., low-quality data), with a focus on mitigating their adverse effects. We begin by evaluating the completeness of annotations at the client level using a designed indicator. Subsequently, we enhance the influence of clients with more comprehensive annotations and implement corrections for incomplete ones, thereby ensuring that models are trained on accurate data. Our method\u2019s effectiveness is validated through its superior performance on two extensively used medical image segmentation datasets, outperforming existing solutions. The code is available at https:\/\/github.com\/HUSTxyy\/FedIA.", "title":"FedIA: Federated Medical Image Segmentation with Heterogeneous Annotation Completeness", "authors":[ "Xiang, Yangyang", "Wu, Nannan", "Yu, Li", "Yang, Xin", "Cheng, Kwang-Ting", "Yan, Zengqiang" ], "id":"Conference", "arxiv_id":"2407.02280", "GitHub":[ "https:\/\/github.com\/HUSTxyy\/FedIA" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":776 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1282_paper.pdf", "bibtext":"@InProceedings{ Yan_EndoFinder_MICCAI2024,\n author = { Yang, Ruijie and Zhu, Yan and Fu, Peiyao and Zhang, Yizhe and Wang, Zhihua and Li, Quanlin and Zhou, Pinghong and Yang, Xian and Wang, Shuo },\n title = { { EndoFinder: Online Image Retrieval for Explainable Colorectal Polyp Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Determining the necessity of resecting malignant polyps during colonoscopy screen is crucial for patient outcomes, yet challenging due to the time-consuming and costly nature of histopathology examination. While deep learning-based classification models have shown promise in achieving optical biopsy with endoscopic images, they often suffer from a lack of explainability. To overcome this limitation, we introduce EndoFinder, a content-based image retrieval framework to find the \u2018digital twin\u2019 polyp in the reference database given a newly detected polyp. The clinical semantics of the new polyp can be inferred referring to the matched ones. EndoFinder pioneers a polyp-aware image encoder that is pre-trained on a large polyp dataset in a self-supervised way, merging masked image modeling with contrastive learning. This results in a generic embedding space ready for different downstream clinical tasks based on image retrieval. We validate the framework on polyp re-identification and optical biopsy tasks, with extensive experiments demonstrating that EndoFinder not only achieves explainable diagnostics but also matches the performance of supervised classification models. EndoFinder\u2019s reliance on image retrieval has the potential to support diverse downstream decision-making tasks during real-time colonoscopy procedures.", "title":"EndoFinder: Online Image Retrieval for Explainable Colorectal Polyp Diagnosis", "authors":[ "Yang, Ruijie", "Zhu, Yan", "Fu, Peiyao", "Zhang, Yizhe", "Wang, Zhihua", "Li, Quanlin", "Zhou, Pinghong", "Yang, Xian", "Wang, Shuo" ], "id":"Conference", "arxiv_id":"2407.11401", "GitHub":[ "https:\/\/github.com\/ku262\/EndoFinder" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":777 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0362_paper.pdf", "bibtext":"@InProceedings{ Yu_SliceConsistent_MICCAI2024,\n author = { Yu, Qinji and Wang, Yirui and Yan, Ke and Lu, Le and Shen, Na and Ye, Xianghua and Ding, Xiaowei and Jin, Dakai },\n title = { { Slice-Consistent Lymph Nodes Detection Transformer in CT Scans via Cross-slice Query Contrastive Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Lymph node (LN) assessment is an indispensable yet very challenging task in the daily clinical workload of radiology and oncology offering valuable insights for cancer staging and treatment planning. Finding scatteredly distributed, low-contrast clinically relevant LNs in 3D CT is difficult even for experienced physicians along with high inter-observer variations. Previous CNN-based lesion and LN detectors often take a 2.5D approach by using a 2D network architecture with multi-slice inputs, which utilizes the pretrained 2D model weights and shows better accuracy as compared to direct 3D detectors. However, slice-based 2.5D detectors fail to place explicit constraints on the inter-slice consistency, where a single 3D LN can be falsely predicted as two or more LN instances or multiple LNs are erroneously merged into one large LN. These will adversely affect the downstream LN metastasis diagnostic task as the 3D size information is one of the most important malignant indicators. In this work, we propose an effective and accurate 2.5D LN detection transformer that explicitly considers the inter-slice consistency within a LN. It first enhances a detection transformer by utilizing an efficient multi-scale 2.5D fusion scheme to leverage pre-trained 2D weights. Then, we introduce a novel cross-slice query contrastive learning module, which pulls the query embeddings of the same 3D LN instance closer and pushes the embeddings of adjacent similar anatomies (hard negatives) farther. Trained and tested on 3D CT scans of 670 patients (with 7252 labeled LN instances) of different body parts (neck, chest, and upper abdomen) and pathologies, our method significantly improves the performance of previous leading detection methods by at least 3\\% average recall at the same FP rates in both internal and external testing.", "title":"Slice-Consistent Lymph Nodes Detection Transformer in CT Scans via Cross-slice Query Contrastive Learning", "authors":[ "Yu, Qinji", "Wang, Yirui", "Yan, Ke", "Lu, Le", "Shen, Na", "Ye, Xianghua", "Ding, Xiaowei", "Jin, Dakai" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/CSCYQJ\/MICCAI24-Slice-Consistent-Lymph-Nodes-DETR" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":778 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1213_paper.pdf", "bibtext":"@InProceedings{ Xia_ANew_MICCAI2024,\n author = { Xia, Wenyao and Fan, Victoria and Peters, Terry and Chen, Elvis C. S. },\n title = { { A New Benchmark In Vivo Paired Dataset for Laparoscopic Image De-smoking } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"The single greatest obstacle in developing effective algorithms for removing surgical smoke in laparoscopic surgery is the lack of a paired dataset featuring real smoky and smoke-free surgical scenes. Consequently, existing de-smoking algorithms are developed and evaluated based on atmospheric scattering models, synthetic data, and non-reference image enhancement metrics, which do not adequately capture the complexity and essence of in vivo surgical scenes with smoke. To bridge this gap, we propose creating a paired dataset by identifying video sequences with relatively stationary scenes from existing laparoscopic surgical recordings where smoke emerges. In addition, we developed an approach to facilitate robust motion tracking through smoke to compensate for patients\u2019 involuntary movements. As a result, we obtained 21 video sequences from 63 laparoscopic prostatectomy procedure recordings, comprising 961 pairs of smoky images and their corresponding smoke-free ground truth. Using this unique dataset, we compared a representative set of current de-smoking methods, confirming their efficacy and revealing their limitations, thereby offering insights for future directions. The dataset is available at https:\/\/github.com\/wxia43\/DesmokeData.", "title":"A New Benchmark In Vivo Paired Dataset for Laparoscopic Image De-smoking", "authors":[ "Xia, Wenyao", "Fan, Victoria", "Peters, Terry", "Chen, Elvis C. S." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/wxia43\/DesmokeData" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":779 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3303_paper.pdf", "bibtext":"@InProceedings{ Rei_DataDriven_MICCAI2024,\n author = { Reithmeir, Anna and Felsner, Lina and Braren, Rickmer F. and Schnabel, Julia A. and Zimmer, Veronika A. },\n title = { { Data-Driven Tissue- and Subject-Specific Elastic Regularization for Medical Image Registration } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Physics-inspired regularization is desired for intra-patient\nimage registration since it can effectively capture the biomechanical characteristics of anatomical structures. However, a major challenge lies in the reliance on physical parameters: Parameter estimations vary widely across the literature, and the physical properties themselves are inherently subject-specific. In this work, we introduce a novel data-driven method that leverages hypernetworks to learn the tissue-dependent elasticity parameters of an elastic regularizer. Notably, our approach facilitates the estimation of patient-specific parameters without the need to retrain the network. We evaluate our method on three publicly available 2D and 3D lung CT and cardiac MR datasets. We find that with our proposed subject-specific tissue-dependent regularization, a higher registration quality is achieved across all datasets compared to using a global regularizer. The code is available at https:\/\/github.com\/compai-lab\/2024-miccai-reithmeir.", "title":"Data-Driven Tissue- and Subject-Specific Elastic Regularization for Medical Image Registration", "authors":[ "Reithmeir, Anna", "Felsner, Lina", "Braren, Rickmer F.", "Schnabel, Julia A.", "Zimmer, Veronika A." ], "id":"Conference", "arxiv_id":"2407.04355", "GitHub":[ "https:\/\/github.com\/compai-lab\/2024-miccai-reithmeir" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":780 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1153_paper.pdf", "bibtext":"@InProceedings{ Sim_MultiModal_MICCAI2024,\n author = { Sim, Jaeyoon and Lee, Minjae and Wu, Guorong and Kim, Won Hwa },\n title = { { Multi-Modal Graph Neural Network with Transformer-Guided Adaptive Diffusion for Preclinical Alzheimer Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The graphical representation of the brain offers critical insights into diagnosing and prognosing neurodegenerative disease via relationships between regions of interest (ROIs). Despite recent emergence of various Graph Neural Networks (GNNs) to effectively capture the relational information, there remain inherent limitations in interpreting the brain networks. Specifically, convolutional approaches ineffectively aggregate information from distant neighborhoods, while attention-based methods exhibit deficiencies in capturing node-centric information, particularly in retaining critical characteristics from pivotal nodes. These shortcomings reveal challenges for identifying disease-specific variation from diverse features from different modalities. In this regard, we propose an integrated framework guiding diffusion process at each node by a downstream transformer where both short- and long-range properties of graphs are aggregated via diffusion-kernel and multi-head attention respectively. We demonstrate the superiority of our model by improving performance of pre-clinical Alzheimer\u2019s disease (AD) classification with various modalities. Also, our model adeptly identifies key ROIs that are closely associated with the preclinical stages of AD, marking a significant potential for early diagnosis and prevision of the disease.", "title":"Multi-Modal Graph Neural Network with Transformer-Guided Adaptive Diffusion for Preclinical Alzheimer Classification", "authors":[ "Sim, Jaeyoon", "Lee, Minjae", "Wu, Guorong", "Kim, Won Hwa" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":781 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2364_paper.pdf", "bibtext":"@InProceedings{ Hou_AClinicaloriented_MICCAI2024,\n author = { Hou, Qingshan and Cheng, Shuai and Cao, Peng and Yang, Jinzhu and Liu, Xiaoli and Tham, Yih Chung and Zaiane, Osmar R. },\n title = { { A Clinical-oriented Multi-level Contrastive Learning Method for Disease Diagnosis in Low-quality Medical Images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Representation learning offers a conduit to elucidate distinctive features within the latent space and interpret the deep models. However, the randomness of lesion distribution and the complexity of low-quality factors in medical images pose great challenges for models to extract key lesion features. Disease diagnosis methods guided by contrastive learning (CL) have shown significant advantages in lesion feature representation. Nevertheless, the effectiveness of CL is highly dependent on the quality of the positive and negative sample pairs. In this work, we propose a clinical-oriented multi-level CL framework that aims to enhance the model\u2019s capacity to extract lesion features and discriminate between lesion and low-quality factors, thereby enabling more accurate disease diagnosis from low-quality medical images. Specifically, we first construct multi-level positive and negative pairs to enhance the model\u2019s comprehensive recognition capability of lesion features by integrating information from different levels and qualities of medical images. Moreover, to improve the quality of the learned lesion embeddings, we introduce a dynamic hard sample mining method based on self-paced learning. The proposed CL framework is validated on two public medical image datasets, EyeQ and Chest X-ray, demonstrating superior performance compared to other state-of-the-art disease diagnostic methods.", "title":"A Clinical-oriented Multi-level Contrastive Learning Method for Disease Diagnosis in Low-quality Medical Images", "authors":[ "Hou, Qingshan", "Cheng, Shuai", "Cao, Peng", "Yang, Jinzhu", "Liu, Xiaoli", "Tham, Yih Chung", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":782 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1324_paper.pdf", "bibtext":"@InProceedings{ Zha_Exploiting_MICCAI2024,\n author = { Zhao, Xiangyu and Ouyang, Xi and Zhang, Lichi and Xue, Zhong and Shen, Dinggang },\n title = { { Exploiting Latent Classes for Medical Image Segmentation from Partially Labeled Datasets } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Notable progress has been made in medical image segmentation models due to the availability of massive training data. Nevertheless, a majority of open-source datasets are only partially labeled, and not all expected organs or tumors are annotated in these images. While previous attempts have been made to only learn segmentation from labeled regions of interest (ROIs), they do not consider the latent classes, i.e., existing but unlabeled ROIs, in the images during the training stage. Moreover, since these methods rely exclusively on labeled ROIs and those unlabeled regions are viewed as background, they need large-scale and diverse datasets to achieve a variety of ROI segmentation. In this paper, we propose a framework that utilizes latent classes for segmentation from partially labeled datasets, aiming to improve segmentation performance, especially for ROIs with only a small number of annotations. Specifically, we first introduce an ROI-aware network to detect the presence of unlabeled ROIs in images and form the latent classes, which are utilized to guide the segmentation learning. Additionally, ROIs with ambiguous existence are constrained by the consistency loss between the predictions of the student and the teacher networks. By regularizing ROIs with different certainty levels under different scenarios, our method can significantly improve the robustness and reliance of segmentation on large-scale datasets. Experimental results on a public benchmark for partially labeled segmentation demonstrate that our proposed method surpasses previous attempts and has great potential to form a large-scale foundation segmentation model.", "title":"Exploiting Latent Classes for Medical Image Segmentation from Partially Labeled Datasets", "authors":[ "Zhao, Xiangyu", "Ouyang, Xi", "Zhang, Lichi", "Xue, Zhong", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":783 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0689_paper.pdf", "bibtext":"@InProceedings{ Wu_Evaluating_MICCAI2024,\n author = { Wu, Jiaqi and Peng, Wei and Li, Binxu and Zhang, Yu and Pohl, Kilian M. },\n title = { { Evaluating the Quality of Brain MRI Generators } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models generating structural brain MRIs have the potential to significantly accelerate discovery of neuroscience studies. However, their use has been limited in part by the way their quality is evaluated. Most evaluations of generative models focus on metrics originally designed for natural images (such as structural similarity index and Fr'echet inception distance). As we show in a comparison of 6 state-of-the-art generative models trained and tested on over 3000 MRIs, these metrics are sensitive to the experimental setup and inadequately assess how well brain MRIs capture macrostructural properties of brain regions (a.k.a., anatomical plausibility). This shortcoming of the metrics results in inconclusive findings even when qualitative differences between the outputs of models are evident. We therefore propose a framework for evaluating models generating brain MRIs, which requires uniform processing of the real MRIs, standardizing the implementation of the models, and automatically segmenting the MRIs generated by the models. The segmentations are used for quantifying the plausibility of anatomy displayed in the MRIs. To ensure meaningful quantification, it is crucial that the segmentations are highly reliable. Our framework rigorously checks this reliability, a step often overlooked by prior work. Only 3 of the 6 generative models produced MRIs, of which at least 95$\\%$ had highly reliable segmentations. More importantly, the assessment of each model by our framework is in line with qualitative assessments, reinforcing the validity of our approach. The code of this framework is available via\n\\url{https:\/\/github.com\/jiaqiw01\/MRIAnatEval.git}.", "title":"Evaluating the Quality of Brain MRI Generators", "authors":[ "Wu, Jiaqi", "Peng, Wei", "Li, Binxu", "Zhang, Yu", "Pohl, Kilian M." ], "id":"Conference", "arxiv_id":"2409.08463", "GitHub":[ "https:\/\/github.com\/jiaqiw01\/MRIAnatEval.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":784 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4136_paper.pdf", "bibtext":"@InProceedings{ Cai_Survival_MICCAI2024,\n author = { Cai, Shangyan and Huang, Weitian and Yi, Weiting and Zhang, Bin and Liao, Yi and Wang, Qiu and Cai, Hongmin and Chen, Luonan and Su, Weifeng },\n title = { { Survival analysis of histopathological image based on a pretrained hypergraph model of spatial transcriptomics data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Survival analysis is critical for clinical decision-making and prognosis in breast cancer treatment. Recent multimodal approaches leverage histopathology images and bulk RNA-seq to improve survival prediction performance, but these approaches fail to explore spatial distribution at the cellular level. In this work, we present a multimodal hypergraph neural network for survival analysis (MHNN-surv) that introduces a pre-trained model for spatial transcriptomic prediction. The method is characterized by making full use of histopathological images to reveal both morphological and genetic information, thus improving the interpretation of heterogeneity. Specifically, MHNN-surv first slices Whole-Slide Imaging (WSI) into patch images, followed by extracting image features and predicting spatial transcriptomic, respectively. Subsequently, an image-based hypergraph is constructed based on three-dimensional nearest-neighbor relationships, while a gene-based hypergraph is formed based on gene expression similarity. By fusing the dual hypergraphs, MHNN-surv performs an in-depth survival analysis on breast cancer using the Cox proportional hazards model. The experimental results demonstrate that MHNN-surv outperforms the state-of-the-art multimodal models in survival analysis.", "title":"Survival analysis of histopathological image based on a pretrained hypergraph model of spatial transcriptomics data", "authors":[ "Cai, Shangyan", "Huang, Weitian", "Yi, Weiting", "Zhang, Bin", "Liao, Yi", "Wang, Qiu", "Cai, Hongmin", "Chen, Luonan", "Su, Weifeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":785 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0918_paper.pdf", "bibtext":"@InProceedings{ Qi_Cardiac_MICCAI2024,\n author = { Qi, Ronghui and Li, Xiaohu and Xu, Lei and Zhang, Jie and Zhang, Yanping and Xu, Chenchu },\n title = { { Cardiac Physiology Knowledge-driven Diffusion Model for Contrast-free Synthesis Myocardial Infarction Enhancement } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Contrast-free AI myocardial infarction enhancement (MIE) synthesis technology has a significant impact on clinics due to its ability to eliminate contrast agents (CAs) administration in the current MI diagnosis. In this paper, we propose a novel cardiac physiology knowledge-driven diffusion model (CPKDM) that, for the first time, integrates cardiac physiology knowledge into cardiac MR data to guide the synthesis of high-quality MIE, thereby enhancing the generalization performance of MIE synthesis. The combining helps the model understand the principles behind the data mapping between non-enhanced image inputs and enhanced image outputs, informing the model on how and why to synthesize MIE. CPKDM leverages cardiac mechanics knowledge and MR imaging atlas knowledge to respectively guide the learning of kinematic features in CINE sequences and morphological features in T1 sequences. Moreover, CPKDM proposes a kinematics-morphology diffusion integration model to progressively fuse kinematic and morphological features for precise MIE synthesis. Evaluation on 195 patients including chronic MI and normal controls, CPKDM significantly improves performance (SSIM by at least 4%) when comparing with the five most recent state-of-the-art methods. These results demonstrate that our CPKDM exhibits superiority and offers a promising alternative for clinical diagnostics.", "title":"Cardiac Physiology Knowledge-driven Diffusion Model for Contrast-free Synthesis Myocardial Infarction Enhancement", "authors":[ "Qi, Ronghui", "Li, Xiaohu", "Xu, Lei", "Zhang, Jie", "Zhang, Yanping", "Xu, Chenchu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":786 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0368_paper.pdf", "bibtext":"@InProceedings{ Gal_Federated_MICCAI2024,\n author = { Galati, Francesco and Cortese, Rosa and Prados, Ferran and Lorenzi, Marco and Zuluaga, Maria A. },\n title = { { Federated Multi-Centric Image Segmentation with Uneven Label Distribution } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"While federated learning is the state-of-the-art methodology for collaborative learning, its adoption for training segmentation models often relies on the assumption of uniform label distributions across participants, and is generally sensitive to the large variability of multi-centric imaging data. To overcome these issues, we propose a novel federated image segmentation approach adapted to complex non-iid setting typical of real-life conditions. We assume that labeled dataset is not available to all clients, and that clients data exhibit differences in distribution due to three factors: different scanners, imaging modalities and imaged organs. Our proposed framework collaboratively builds a multimodal data factory that embeds a shared, disentangled latent representation across participants. In a second asynchronous stage, this setup enables local domain adaptation without exchanging raw data or annotations, facilitating target segmentation. We evaluate our method across three distinct scenarios, including multi-scanner cardiac magnetic resonance segmentation, multi-modality skull stripping, and multi-organ vascular segmentation. The results obtained demonstrate the quality and robustness of our approach as compared to the state-of-the-art methods.", "title":"Federated Multi-Centric Image Segmentation with Uneven Label Distribution", "authors":[ "Galati, Francesco", "Cortese, Rosa", "Prados, Ferran", "Lorenzi, Marco", "Zuluaga, Maria A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/i-vesseg\/RobustMedSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":787 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0103_paper.pdf", "bibtext":"@InProceedings{ Wen_Learning_MICCAI2024,\n author = { Weng, Weihao and Zhu, Xin },\n title = { { Learning Representations by Maximizing Mutual Information Across Views for Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"We propose a method that leverages multiple identical network structures to generate and process diverse augmented views of the same medical image sample. By employing contrastive learning, we maximize mutual information among features extracted from different views, ensuring the networks learn robust and high-level semantic representations. Results from testing on four public and one private endoscopic surgical tool segmentation datasets indicate that the proposed method outperformed state-of-the-art semi-supervised and fully supervised segmentation methods. After trained by 5% labeled training data, the proposed method achieved an improvement of 11.5%, 8.4%, 6.5%, and 5.8% on RoboTool, Kvasir-instrument, ART-NET, and FEES, respectively. Ablation studies were also performed to measure the effectiveness of each proposed module. Code is available at \\href{https:\/\/github.com\/on1kou95\/Mutual-Exemplar}{Mutual-Exemplar}.", "title":"Learning Representations by Maximizing Mutual Information Across Views for Medical Image Segmentation", "authors":[ "Weng, Weihao", "Zhu, Xin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/on1kou95\/Mutual-Exemplar" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":788 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0745_paper.pdf", "bibtext":"@InProceedings{ Li_CausCLIP_MICCAI2024,\n author = { Li, Yiran and Cui, Xiaoxiao and Cao, Yankun and Zhang, Yuezhong and Wang, Huihui and Cui, Lizhen and Liu, Zhi and Li, Shuo },\n title = { { CausCLIP: Causality-Adapting Visual Scoring of Visual Language Models for Few-Shot Learning in Portable Echocardiography Quality Assessment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"How do we transfer Vision Language Models (VLMs), pre-trained in the source domain of conventional echocardiography (Echo), to the target domain of few-shot portable Echo (fine-tuning)? Learning image causality is crucial for few-shot learning in portable echocardiography quality assessment (PEQA), due to the domain-invariant causal and topological consistency. However, the lack of significant domain shifts and well-labeled data in PEQA present challenges to get reliable measurements of image causality. We investigate the challenging problem of this task, i.e., learning a consistent representation of domain-invariant causal semantic features. We propose a novel VLMs based PEQA network, Causality-Adapting Visual Scoring CLIP (CausCLIP), embedding causal diposition to measure image causality for domain-invariant representation. Specifically, Causal-Aware Visual Adapter (CVA) identifies hidden asymmetric causal relationships and learns interpretable domain-invariant causal semantic consistency, thereby improving adaptability. Visual-Consistency Contrastive Learning (VCL) focuses on the most discriminative regions by registing visual-causal similarity, enhancing discriminability. Multi-granular Image-Text Adaptive Constraints (MAC) adaptively integrate task-specific semantic multi-granular information, enhancing robustness in multi-task learning. Experimental results show that CausCLIP outperforms state-of-the-art methods, achieving absolute improvements of 4.1%, 9.5%, and 8.5% in view category, quality score, and distortion metrics, respectively.", "title":"CausCLIP: Causality-Adapting Visual Scoring of Visual Language Models for Few-Shot Learning in Portable Echocardiography Quality Assessment", "authors":[ "Li, Yiran", "Cui, Xiaoxiao", "Cao, Yankun", "Zhang, Yuezhong", "Wang, Huihui", "Cui, Lizhen", "Liu, Zhi", "Li, Shuo" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":789 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3822_paper.pdf", "bibtext":"@InProceedings{ Hua_Hard_MICCAI2024,\n author = { Huang, Wentao and Hu, Xiaoling and Abousamra, Shahira and Prasanna, Prateek and Chen, Chao },\n title = { { Hard Negative Sample Mining for Whole Slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Weakly supervised whole slide image (WSI) classification is challenging due to the lack of patch-level labels and high computational costs. State-of-the-art methods use self-supervised patch-wise feature representations for multiple instance learning (MIL). Recently, methods have been proposed to fine-tune the feature representation on the downstream task using pseudo labeling, but mostly focusing on selecting high-quality positive patches. In this paper, we propose to mine hard negative samples during fine-tuning. This allows us to obtain better feature representations and reduce the training cost. Furthermore, we propose a novel patch-wise ranking loss in MIL to better exploit these hard negative samples. Experiments on two public datasets demonstrate the efficacy of these proposed ideas. Our codes are available at https:\/\/github.com\/winston52\/HNM-WSI.", "title":"Hard Negative Sample Mining for Whole Slide Image Classification", "authors":[ "Huang, Wentao", "Hu, Xiaoling", "Abousamra, Shahira", "Prasanna, Prateek", "Chen, Chao" ], "id":"Conference", "arxiv_id":"2410.02212", "GitHub":[ "https:\/\/github.com\/winston52\/HNM-WSI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":790 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0811_paper.pdf", "bibtext":"@InProceedings{ Yan_NeuroLink_MICCAI2024,\n author = { Yan, Haiyang and Zhai, Hao and Guo, Jinyue and Li, Linlin and Han, Hua },\n title = { { NeuroLink: Bridging Weak Signals in Neuronal Imaging with Morphology Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Reconstructing neurons from large-scale optical microscope images is a challenging task due to the complexity of neuronal structures and extremely weak signals in certain regions. Traditional segmentation models, built on vanilla convolutions and voxel-wise losses, struggle to model long-range relationships in sparse volumetric data. As a result, weak signals in the feature space get mixed with noise, leading to interruptions in segmentation and premature termination in neuron tracing results. To address this issue, we propose NeuroLink to add continuity constraints to the network and implicitly model neuronal morphology by utilizing multi-task learning methods. Specifically, we introduce the Dynamic Snake Convolution to extract more effective features for the sparse tubular structure of neurons and propose a easily implementable morphology-based loss function to penalize discontinuous predictions. In addition, we guide the network to leverage the morphological information of the neuron for predicting direction and distance transformation maps of neurons. Our method achieved higher recall and precision on the low-contrast Zebrafish dataset and the publicly available BigNeuron dataset. Our code is available at https:\/\/github.com\/Qingjia0226\/NeuroLink.", "title":"NeuroLink: Bridging Weak Signals in Neuronal Imaging with Morphology Learning", "authors":[ "Yan, Haiyang", "Zhai, Hao", "Guo, Jinyue", "Li, Linlin", "Han, Hua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Qingjia0226\/NeuroLink" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":791 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1874_paper.pdf", "bibtext":"@InProceedings{ Li_Exploring_MICCAI2024,\n author = { Li, Lanting and Zhang, Liuzeng and Cao, Peng and Yang, Jinzhu and Wang, Fei and Zaiane, Osmar R. },\n title = { { Exploring Spatio-Temporal Interpretable Dynamic Brain Function with Transformer for Brain Disorder Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"The dynamic variation in the spatio-temporal organizational patterns of brain functional modules (BFMs) associated with brain disorders remains unclear. To solve this issue, we propose an end-to-end transformer-based framework for sufficiently learning the spatio-temporal characteristics of BFMs and exploring the interpretable variation related to brain disorders. Specifically, the proposed model incorporates a supervisory guidance spatio-temporal clustering strategy for automatically identifying the BFMs with the dynamic temporal-varying weights and a multi-channel self-attention mechanism with topology-aware projection for sufficiently exploring the temporal variation and spatio-temporal representation. The experimental results on the diagnosis of Major Depressive Disorder (MDD) and Bipolar Disorder (BD) indicate that our model achieves state-of-the-art performance. Moreover, our model is capable of identifying the spatio-temporal patterns of brain activity and providing evidence associated with brain disorders. Our code is available at https:\/\/github.com\/llt1836\/BISTformer.", "title":"Exploring Spatio-Temporal Interpretable Dynamic Brain Function with Transformer for Brain Disorder Diagnosis", "authors":[ "Li, Lanting", "Zhang, Liuzeng", "Cao, Peng", "Yang, Jinzhu", "Wang, Fei", "Zaiane, Osmar R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/llt1836\/BISTformer" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":792 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2286_paper.pdf", "bibtext":"@InProceedings{ Wan_Groupwise_MICCAI2024,\n author = { Wang, Fanwen and Luo, Yihao and Wen, Ke and Huang, Jiahao and Ferreira, Pedro F. and Luo, Yaqing and Wu, Yinzhe and Munoz, Camila and Pennell, Dudley J. and Scott, Andrew D. and Nielles-Vallespin, Sonia and Yang, Guang },\n title = { { Groupwise Deformable Registration of Diffusion Tensor Cardiovascular Magnetic Resonance: Disentangling Diffusion Contrast, Respiratory and Cardiac Motions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Diffusion tensor based cardiovascular magnetic resonance (DT-CMR) offers a non-invasive method to visualize the myocardial microstructure. With the assumption that the heart is stationary, frames are acquired with multiple repetitions for different diffusion encoding directions. However, motion from poor breath-holding and imprecise cardiac triggering complicates DT-CMR analysis, further challenged by its inherently low SNR, varied contrasts, and diffusion-induced textures. Our solution is a novel framework employing groupwise registration with an implicit template to isolate respiratory and cardiac motions, while a tensor-embedded branch preserves diffusion contrast textures. We\u2019ve devised a loss refinement tailored for non-linear least squares fitting and low SNR conditions. Additionally, we introduce new physics-based and clinical metrics for performance evaluation. Access code and supplementary materials at: https:\/\/github.com\/ayanglab\/DTCMR-Reg", "title":"Groupwise Deformable Registration of Diffusion Tensor Cardiovascular Magnetic Resonance: Disentangling Diffusion Contrast, Respiratory and Cardiac Motions", "authors":[ "Wang, Fanwen", "Luo, Yihao", "Wen, Ke", "Huang, Jiahao", "Ferreira, Pedro F.", "Luo, Yaqing", "Wu, Yinzhe", "Munoz, Camila", "Pennell, Dudley J.", "Scott, Andrew D.", "Nielles-Vallespin, Sonia", "Yang, Guang" ], "id":"Conference", "arxiv_id":"2406.13788", "GitHub":[ "https:\/\/github.com\/ayanglab\/DTCMR-Reg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":793 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1928_paper.pdf", "bibtext":"@InProceedings{ Nav_Ensembled_MICCAI2024,\n author = { Naval Marimont, Sergio and Siomos, Vasilis and Baugh, Matthew and Tzelepis, Christos and Kainz, Bernhard and Tarroni, Giacomo },\n title = { { Ensembled Cold-Diffusion Restorations for Unsupervised Anomaly Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15011 },\n month = {October},\n pages = { pending },\n }", "abstract":"Unsupervised Anomaly Detection (UAD) methods aim to identify anomalies in test samples comparing them with a normative distribution learned from a dataset known to be anomaly-free. Approaches based on generative models offer interpretability by generating anomaly-free versions of test images, but are typically unable to identify subtle anomalies. Alternatively, approaches using feature modelling or self-supervised methods, such as the ones relying on synthetically generated anomalies, do not provide out-of-the-box interpretability. In this work, we present a novel method that combines the strengths of both strategies: a generative cold-diffusion pipeline (i.e., a diffusion-like pipeline which uses corruptions not based on noise) that is trained with the objective of turning synthetically-corrupted images back to their normal, original appearance. To support our pipeline we introduce a novel synthetic anomaly generation procedure, called DAG, and a novel anomaly score which ensembles restorations conditioned with different degrees of abnormality. Our method surpasses the prior state-of-the art for unsupervised anomaly detection in three different Brain MRI datasets.", "title":"Ensembled Cold-Diffusion Restorations for Unsupervised Anomaly Detection", "authors":[ "Naval Marimont, Sergio", "Siomos, Vasilis", "Baugh, Matthew", "Tzelepis, Christos", "Kainz, Bernhard", "Tarroni, Giacomo" ], "id":"Conference", "arxiv_id":"2407.06635", "GitHub":[ "https:\/\/github.com\/snavalm\/disyre" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":794 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1797_paper.pdf", "bibtext":"@InProceedings{ Wu_Gazedirected_MICCAI2024,\n author = { Wu, Shaoxuan and Zhang, Xiao and Wang, Bin and Jin, Zhuo and Li, Hansheng and Feng, Jun },\n title = { { Gaze-directed Vision GNN for Mitigating Shortcut Learning in Medical Image } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep neural networks have demonstrated remarkable performance in medical image analysis. However, its susceptibility to spurious correlations due to shortcut learning raises concerns about network interpretability and reliability. Furthermore, shortcut learning is exacerbated in medical contexts where disease indicators are often subtle and sparse. In this paper, we propose a novel gaze-directed Vision GNN (called GD-ViG) to leverage the visual patterns of radiologists from gaze as expert knowledge, directing the network toward disease-relevant regions, and thereby mitigating shortcut learning. GD-ViG consists of a gaze map generator (GMG) and a gaze-directed classifier (GDC). Combining the global modelling ability of GNNs with the locality of CNNs, GMG generates the gaze map based on radiologists\u2019 visual patterns. Notably, it eliminates the need for real gaze data during inference, enhancing the network\u2019s practical applicability. Utilizing gaze as the expert knowledge, the GDC directs the construction of graph structures by incorporating both feature distances and gaze distances, enabling the network to focus on disease-relevant foregrounds. Thereby avoiding shortcut learning and improving the network\u2019s interpretability. The experiments on two public medical image datasets demonstrate that GD-ViG outperforms the state-of-the-art methods, and effectively mitigates shortcut learning. Our code is available at https:\/\/github.com\/SX-SS\/GD-ViG.", "title":"Gaze-directed Vision GNN for Mitigating Shortcut Learning in Medical Image", "authors":[ "Wu, Shaoxuan", "Zhang, Xiao", "Wang, Bin", "Jin, Zhuo", "Li, Hansheng", "Feng, Jun" ], "id":"Conference", "arxiv_id":"2406.14050", "GitHub":[ "https:\/\/github.com\/SX-SS\/GD-ViG" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":795 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0889_paper.pdf", "bibtext":"@InProceedings{ Su_Design_MICCAI2024,\n author = { Su, Tongkun and Li, Jun and Zhang, Xi and Jin, Haibo and Chen, Hao and Wang, Qiong and Lv, Faqin and Zhao, Baoliang and Hu, Ying },\n title = { { Design as Desired: Utilizing Visual Question Answering for Multimodal Pre-training } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Multimodal pre-training demonstrates its potential in the medical domain, which learns medical visual representations from paired medical reports. However, many pre-training tasks require extra annotations from clinicians, and most of them fail to explicitly guide the model to learn the desired features of different pathologies. In this paper, we utilize Visual Question Answering (VQA) for multimodal pre-training to guide the framework focusing on targeted pathological features. We leverage descriptions in medical reports to design multi-granular question-answer pairs associated with different diseases, which assist the framework in pre-training without requiring extra annotations from experts. We also propose a novel pre-training framework with a quasi-textual feature transformer, a module designed to transform visual features into a quasi-textual space closer to the textual domain via a contrastive learning strategy. This narrows the vision-language gap and facilitates modality alignment. Our framework is applied to four downstream tasks: report generation, classification, segmentation, and detection across five datasets. Extensive experiments demonstrate the superiority of our framework compared to other state-of-the-art methods. Our code is available at https:\/\/github.com\/MoramiSu\/QFT.", "title":"Design as Desired: Utilizing Visual Question Answering for Multimodal Pre-training", "authors":[ "Su, Tongkun", "Li, Jun", "Zhang, Xi", "Jin, Haibo", "Chen, Hao", "Wang, Qiong", "Lv, Faqin", "Zhao, Baoliang", "Hu, Ying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/MoramiSu\/QFT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":796 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2068_paper.pdf", "bibtext":"@InProceedings{ Che_Vestibular_MICCAI2024,\n author = { Chen, Yunjie and Wolterink, Jelmer M. and Neve, Olaf M. and Romeijn, Stephan R. and Verbist, Berit M. and Hensen, Erik F. and Tao, Qian and Staring, Marius },\n title = { { Vestibular schwannoma growth prediction from longitudinal MRI by time-conditioned neural fields } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Vestibular schwannomas (VS) are benign tumors that are generally managed by active surveillance with MRI examination. To further assist clinical decision-making and avoid overtreatment, an accurate prediction of tumor growth based on longitudinal imaging is highly desirable. In this paper, we introduce DeepGrowth, a deep learning method that incorporates neural fields and recurrent neural networks for prospective tumor growth prediction. In the proposed method, each tumor is represented as a signed distance function (SDF) conditioned on a low-dimensional latent code. Unlike previous studies, we predict the latent codes of the future tumor and generate the tumor shapes from it using a multilayer perceptron (MLP). To deal with irregular time intervals, we introduce a time-conditioned recurrent module based on a ConvLSTM and a novel temporal encoding strategy, which enables the proposed model to output varying tumor shapes over time. The experiments on an in-house longitudinal VS dataset showed that the proposed model significantly improved the performance (>=1.6% Dice score and >=0.20 mm 95% Hausdorff distance), in particular for top 20% tumors that grow or shrink the most (>=4.6% Dice score and >= 0.73 mm 95% Hausdorff distance). Our code is available at https:\/\/github.com\/cyjdswx\/DeepGrowth.", "title":"Vestibular schwannoma growth prediction from longitudinal MRI by time-conditioned neural fields", "authors":[ "Chen, Yunjie", "Wolterink, Jelmer M.", "Neve, Olaf M.", "Romeijn, Stephan R.", "Verbist, Berit M.", "Hensen, Erik F.", "Tao, Qian", "Staring, Marius" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cyjdswx\/DeepGrowth" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":797 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3934_paper.pdf", "bibtext":"@InProceedings{ Yu_Gyri_MICCAI2024,\n author = { Yu, Xiaowei and Zhang, Lu and Cao, Chao and Chen, Tong and Lyu, Yanjun and Zhang, Jing and Liu, Tianming and Zhu, Dajiang },\n title = { { Gyri vs. Sulci: Core-Periphery Organization in Functional Brain Networks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"The human cerebral cortex is highly convoluted into convex gyri and concave sulci. It has been demonstrated that gyri and sulci are significantly different in their anatomy, connectivity, and function: besides exhibiting opposite shape patterns, long-distance axonal fibers connected to gyri are much denser than those connected to sulci, and neural signals on gyri are more complex in low-frequency while sulci are more complex in high-frequency. Although accumulating evidence shows significant differences between gyri and sulci, their primary roles in brain function have not been elucidated yet. To solve this fundamental problem, we design a novel Twin-Transformer framework to unveil the unique functional roles of gyri and sulci as well as their relationship in the whole brain function. Our Twin-Transformer framework adopts two structure-identical (twin) Transformers to disentangle spatial-temporal patterns of functional brain networks: one focuses on the spatial patterns and the other is on temporal patterns. The spatial transformer takes the spatially divided patches and generates spatial patterns, while the temporal transformer takes the temporally split patches and produces temporal patterns. We validated our Twin-Transformer on the HCP task-fMRI dataset, for the first time, to elucidate the different roles of gyri and sulci in brain function. Our results suggest that gyri and sulci could work together in a core-periphery network manner, that is, gyri could serve as core networks for information gathering and distributing, while sulci could serve as periphery networks for specific local information processing. These findings have shed new light on our fundamental understanding of the brain\u2019s basic structural and functional mechanisms.", "title":"Gyri vs. Sulci: Core-Periphery Organization in Functional Brain Networks", "authors":[ "Yu, Xiaowei", "Zhang, Lu", "Cao, Chao", "Chen, Tong", "Lyu, Yanjun", "Zhang, Jing", "Liu, Tianming", "Zhu, Dajiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":798 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1056_paper.pdf", "bibtext":"@InProceedings{ Yu_LanguageEnhanced_MICCAI2024,\n author = { Yu, Jianxun and Hu, Qixin and Jiang, Meirui and Wang, Yaning and Wong, Chin Ting and Wang, Jing and Zhang, Huimao and Dou, Qi },\n title = { { Language-Enhanced Local-Global Aggregation Network for Multi-Organ Trauma Detection } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Abdominal trauma is one of the leading causes of death in the elderly population and increasingly poses a global challenge. However, interpreting CT scans for abdominal trauma is considerably challenging for deep learning models. Trauma may exist in various organs presenting different shapes and morphologies. In addition, a thorough comprehension of visual cues and various types of trauma is essential, demanding a high level of domain expertise. To address these issues, this paper introduces a language-enhanced local-global aggregation network that aims to fully utilize both global contextual information and local organ-specific information inherent in images for accurate trauma detection. Furthermore, the network is enhanced by text embedding from Large Language Models (LLM). This LLM-based text embedding possesses substantial medical knowledge, enabling the model to capture anatomical relationships of intra-organ and intra-trauma connections. We have conducted experiments on one public dataset of RSNA Abdominal Trauma Detection (ATD) and one in-house dataset. Compared with existing state-of-the-art methods, the F1-score of organ-level trauma detection improves from 51.4% to 62.5% when evaluated on the public dataset and from 61.9% to 65.2% on the private cohort, demonstrating the efficacy of our proposed approach for multi-organ trauma detection. Code is available at: https:\/\/github.com\/med-air\/TraumaDet", "title":"Language-Enhanced Local-Global Aggregation Network for Multi-Organ Trauma Detection", "authors":[ "Yu, Jianxun", "Hu, Qixin", "Jiang, Meirui", "Wang, Yaning", "Wong, Chin Ting", "Wang, Jing", "Zhang, Huimao", "Dou, Qi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/med-air\/TraumaDet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":799 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1368_paper.pdf", "bibtext":"@InProceedings{ El_Joint_MICCAI2024,\n author = { El Nahhas, Omar S. M. and W\u00f6lflein, Georg and Ligero, Marta and Lenz, Tim and van Treeck, Marko and Khader, Firas and Truhn, Daniel and Kather, Jakob Nikolas },\n title = { { Joint multi-task learning improves weakly-supervised biomarker prediction in computational pathology } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep Learning (DL) can predict biomarkers directly from digitized cancer histology in a weakly-supervised setting. Recently, the prediction of continuous biomarkers through regression-based DL has seen an increasing interest. Nonetheless, clinical decision making often requires a categorical outcome. Consequently, we developed a weakly-supervised joint multi-task Transformer architecture which has been trained and evaluated on four public patient cohorts for the prediction of two key predictive biomarkers, microsatellite instability (MSI) and homologous recombination deficiency (HRD), trained with auxiliary regression tasks related to the tumor microenvironment. Moreover, we perform a comprehensive benchmark of 16 task balancing approaches for weakly-supervised joint multi-task learning in computational pathology. Using our novel approach, we outperform the state of the art by +7.7% and +4.1% as measured by the area under the receiver operating characteristic, and enhance clustering of latent embeddings by +8% and +5%, for the prediction of MSI and HRD in external cohorts, respectively.", "title":"Joint multi-task learning improves weakly-supervised biomarker prediction in computational pathology", "authors":[ "El Nahhas, Omar S. M.", "W\u00f6lflein, Georg", "Ligero, Marta", "Lenz, Tim", "van Treeck, Marko", "Khader, Firas", "Truhn, Daniel", "Kather, Jakob Nikolas" ], "id":"Conference", "arxiv_id":"2403.03891", "GitHub":[ "https:\/\/github.com\/KatherLab\/joint-mtl-cpath" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":800 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2178_paper.pdf", "bibtext":"@InProceedings{ Ans_Algorithmic_MICCAI2024,\n author = { Ansari, Faizanuddin and Chakraborti, Tapabrata and Das, Swagatam },\n title = { { Algorithmic Fairness in Lesion Classification by Mitigating Class Imbalance and Skin Tone Bias } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning models have shown considerable promise in the classification of skin lesions. However, a notable challenge arises from their inherent bias towards dominant skin tones and the issue of imbalanced class representation. This study introduces a novel data augmentation technique designed to address these limitations. Our approach harnesses contextual information from the prevalent class to synthesize various samples representing minority classes. Using a mixup-based algorithm guided by an adaptive sampler, our method effectively tackles bias and class imbalance issues. The adaptive sampler dynamically adjusts sampling probabilities based on the network\u2019s meta-set performance, enhancing overall accuracy. Our research demonstrates the efficacy of this approach in mitigating skin tone bias and achieving robust lesion classification across a spectrum of diverse skin colors from two distinct benchmark datasets, offering promising implications for improving dermatological diagnostic systems.", "title":"Algorithmic Fairness in Lesion Classification by Mitigating Class Imbalance and Skin Tone Bias", "authors":[ "Ansari, Faizanuddin", "Chakraborti, Tapabrata", "Das, Swagatam" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/fa-submit\/Submission_M" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":801 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0928_paper.pdf", "bibtext":"@InProceedings{ Tia_PANS_MICCAI2024,\n author = { Tian, Qingyao and Chen, Zhen and Liao, Huai and Huang, Xinyan and Yang, Bingyu and Li, Lujie and Liu, Hongbin },\n title = { { PANS: Probabilistic Airway Navigation System for Real-time Robust Bronchoscope Localization } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate bronchoscope localization is essential for pulmonary interventions, by providing six degrees of freedom (DOF) in airway navigation. However, the robustness of current vision-based methods is often compromised in clinical practice, and they struggle to perform in real-time and to generalize across cases unseen during training. To overcome these challenges, we propose a novel Probabilistic Airway Navigation System (PANS), leveraging Monte-Carlo method with pose hypotheses and likelihoods to achieve robust and real-time bronchoscope localization. Specifically, our PANS incorporates diverse visual representations (e.g., odometry and landmarks) by leveraging two key modules, including the Depth-based Motion Inference (DMI) and the Bronchial Semantic Analysis (BSA). To generate the pose hypotheses of bronchoscope for PANS, we devise the DMI to accurately propagate the estimation of pose hypotheses over time. Moreover, to estimate the accurate pose likelihood, we devise the BSA module by effectively distinguishing between similar bronchial regions in endoscopic images, along with a novel metric to assess the congruence between estimated depth maps and the segmented airway structure. Under this probabilistic formulation, our PANS is capable of achieving the 6-DOF bronchoscope localization with superior accuracy and robustness. Extensive experiments on the collected pulmonary intervention dataset comprising 10 clinical cases confirm the advantage of our PANS over state-of-the-arts, in terms of both robustness and generalization in localizing deeper airway branches and the efficiency of real-time inference. The proposed PANS reveals its potential to be a reliable tool in the operating room, promising to enhance the quality and safety of pulmonary interventions.", "title":"PANS: Probabilistic Airway Navigation System for Real-time Robust Bronchoscope Localization", "authors":[ "Tian, Qingyao", "Chen, Zhen", "Liao, Huai", "Huang, Xinyan", "Yang, Bingyu", "Li, Lujie", "Liu, Hongbin" ], "id":"Conference", "arxiv_id":"2407.05554", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":802 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3894_paper.pdf", "bibtext":"@InProceedings{ Ngu_Volumeoptimal_MICCAI2024,\n author = { Nguyen, Nghi and Hou, Tao and Amico, Enrico and Zheng, Jingyi and Huang, Huajun and Kaplan, Alan D. and Petri, Giovanni and Gon\u0303i, Joaqu\u0301\u0131n and Kaufmann, Ralph and Zhao, Yize and Duong-Tran, Duy and Shen, Li },\n title = { { Volume-optimal persistence homological scaffolds of hemodynamic networks covary with MEG theta-alpha aperiodic dynamics } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Higher-order properties of functional magnetic resonance imaging (fMRI) induced connectivity have been shown to unravel many exclusive topological and dynamical insights beyond pairwise interactions. Nonetheless, whether these fMRI-induced higher-order properties play a role in disentangling other neuroimaging modalities\u2019 insights remains largely unexplored and poorly understood. In this work, by analyzing fMRI data from the Human Connectome Project Young Adult dataset using persistent homology, we discovered that the volume-optimal persistence homological scaffolds of fMRI-based functional connectomes exhibited conservative topological reconfigurations from the resting state to attentional task-positive state. Specifically, while reflecting the extent to which each cortical region contributed to functional cycles following different cognitive demands, these reconfigurations were constrained such that the spatial distribution of cavities in the connectome is relatively conserved. Most importantly, such level of contributions covaried with powers of aperiodic activities mostly within the theta-alpha (4-12 Hz) band measured by magnetoencephalography (MEG). This comprehensive result suggests that fMRI-induced hemodynamics and MEG theta-alpha aperiodic activities are governed by the same functional constraints specific to each cortical morpho-structure. Methodologically, our work paves the way toward an innovative computing paradigm in multimodal neuroimaging topological learning. The code for our analyses is provided in https:\/\/github.com\/ngcaonghi\/scaffold_noise.", "title":"Volume-optimal persistence homological scaffolds of hemodynamic networks covary with MEG theta-alpha aperiodic dynamics", "authors":[ "Nguyen, Nghi", "Hou, Tao", "Amico, Enrico", "Zheng, Jingyi", "Huang, Huajun", "Kaplan, Alan D.", "Petri, Giovanni", "Gon\u0303i, Joaqu\u0301\u0131n", "Kaufmann, Ralph", "Zhao, Yize", "Duong-Tran, Duy", "Shen, Li" ], "id":"Conference", "arxiv_id":"2407.05060", "GitHub":[ "https:\/\/github.com\/ngcaonghi\/scaffold_noise" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":803 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0193_paper.pdf", "bibtext":"@InProceedings{ Xu_Poisson_MICCAI2024,\n author = { Xu, Yinsong and Wang, Yipei and Shen, Ziyi and Gayo, Iani J. M. B. and Thorley, Natasha and Punwani, Shonit and Men, Aidong and Barratt, Dean C. and Chen, Qingchao and Hu, Yipeng },\n title = { { Poisson Ordinal Network for Gleason Group Estimation Using Bi-Parametric MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"The Gleason groups serve as the primary histological grading system for prostate cancer, providing crucial insights into the cancer\u2019s potential for growth and metastasis. In clinical practice, pathologists determine the Gleason groups based on specimens obtained from ultrasound-guided biopsies. In this study, we investigate the feasibility of directly estimating the Gleason groups from MRI scans to reduce otherwise required biopsies. We identify two characteristics of this task, ordinality and the resulting dependent yet unknown variances between Gleason groups. In addition to the inter-\/intra-observer variability in a multi-step Gleason scoring process based on the interpretation of Gleason patterns, our MR-based prediction is also subject to specimen sampling variance and, to a lesser degree, varying MR imaging protocols. To address this challenge, we propose a novel Poisson ordinal network (PON). PONs model the prediction using a Poisson distribution and leverages Poisson encoding and Poisson focal loss to capture a learnable dependency between ordinal classes (here, Gleason groups), rather than relying solely on the numerical ground-truth (e.g. Gleason Groups 1-5 or Gleason Scores 6-10). To improve this modelling efficacy, PONs also employ contrastive learning with a memory bank to regularise intra-class variance, decoupling the memory requirement of contrast learning from the batch size. Experimental results based on the images labelled by saturation biopsies from 265 prior-biopsy-blind patients, across two tasks demonstrate the superiority and effectiveness of our proposed method.", "title":"Poisson Ordinal Network for Gleason Group Estimation Using Bi-Parametric MRI", "authors":[ "Xu, Yinsong", "Wang, Yipei", "Shen, Ziyi", "Gayo, Iani J. M. B.", "Thorley, Natasha", "Punwani, Shonit", "Men, Aidong", "Barratt, Dean C.", "Chen, Qingchao", "Hu, Yipeng" ], "id":"Conference", "arxiv_id":"2407.05796", "GitHub":[ "https:\/\/github.com\/Yinsongxu\/PON.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":804 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3427_paper.pdf", "bibtext":"@InProceedings{ Cob_Improved_MICCAI2024,\n author = { Cobb, Robert and Cook, Gary J. R. and Reader, Andrew J. },\n title = { { Improved Classification Learning from Highly Imbalanced Multi-Label Datasets of Inflamed Joints in [99mTc]Maraciclatide Imaging of Arthritic Patients by Natural Image and Diffusion Model Augmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Gamma camera imaging of the novel radiopharmaceutical [99mTc]maraciclatide can be used to detect inflammation in patients with rheumatoid arthritis. Due to the novelty of this clinical imaging application, data are especially scarce with only one dataset composed of 48 patients available for development of classification models. In this work we classify inflammation in individual joints in the hands of patients using only this small dataset. Our methodology combines diffusion models to augment the available training data for this classification task from an otherwise small and imbalanced dataset. We also explore the use of augmenting with a publicly available natural image dataset in combination with a diffusion model. We use a DenseNet model to classify the inflammation of individual joints in the hand. Our results show that compared to non-augmented baseline classification accuracy, sensitivity, and specificity metrics of 0.79 \u00b1 0.05, 0.50 \u00b1 0.04, and 0.85 \u00b1 0.05, respectively our method improves model performance for these metrics to 0.91 \u00b1 0.02, 0.79 \u00b1 0.11, 0.93 \u00b1 0.02, respectively. When we use an ensemble model and combine natural image augmentation with [99mTc]maraciclatide augmentation we see performance increase to 0.92 \u00b1 0.02, 0.80 \u00b1 0.09, 0.95 \u00b1 0.02 for accuracy, sensitivity, and specificity, respectively.", "title":"Improved Classification Learning from Highly Imbalanced Multi-Label Datasets of Inflamed Joints in [99mTc]Maraciclatide Imaging of Arthritic Patients by Natural Image and Diffusion Model Augmentation", "authors":[ "Cobb, Robert", "Cook, Gary J. R.", "Reader, Andrew J." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":805 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1760_paper.pdf", "bibtext":"@InProceedings{ Fen_Unified_MICCAI2024,\n author = { Feng, Yidan and Gao, Bingchen and Deng, Sen and Qiu, Anqi and Qin, Jing },\n title = { { Unified Multi-Modal Learning for Any Modality Combinations in Alzheimer\u2019s Disease Diagnosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Our method solves unified multi-modal learning in an diverse and imbalanced setting, which are the key features of medical modalities compared with the extensively-studied ones. Different from existing works that assumed fixed or maximum number of modalities for multi-modal learning, our model not only manages any missing scenarios but is also capable of handling new modalities and unseen combinations. We argue that, the key towards this any combination model is the proper design of alignment, which should guarantee both modality invariance across diverse inputs and effective modeling of complementarities within the unified metric space. Instead of exact cross-modal alignment, we propose to decouple these two functions into representation-level and task-level alignment, which we empirically show is both dispensable in this task. Moreover, we introduce tunable modality-agnostic Transformer to unify the representation learning process, which significantly reduces modality-specific parameters and enhances the scalability of our model. \nThe experiments have shown that the proposed method enables one single model handling all possible combinations of the six seen modalities and two new modalities in Alzheimer\u2019s Disease diagnosis, with superior performance on longer combinations.", "title":"Unified Multi-Modal Learning for Any Modality Combinations in Alzheimer\u2019s Disease Diagnosis", "authors":[ "Feng, Yidan", "Gao, Bingchen", "Deng, Sen", "Qiu, Anqi", "Qin, Jing" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":806 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2321_paper.pdf", "bibtext":"@InProceedings{ Wu_Cortical_MICCAI2024,\n author = { Wu, Wenxuan and Qu, Ruowen and Shi, Dongzi and Xiong, Tong and Xu, Xiangmin and Xing, Xiaofen and Zhang, Xin },\n title = { { Cortical Surface Reconstruction from 2D MRI with Segmentation-Constrained Super-Resolution and Representation Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Cortical surface reconstruction typically relies on high-quality 3D brain MRI to establish the structure of cortex, playing a pivotal role in unveiling neurodevelopmental patterns. However, clinical challenges emerge due to elevated costs and prolonged acquisition times, often resulting in low-quality 2D brain MRI. To optimize the utilization of clinical data for cerebral cortex analysis, we propose a two-stage method for cortical surface reconstruction from 2D brain MRI images. The first stage employs segmentation-constrained MRI super-resolution, concatenating the super-resolution (SR) model and cortical ribbon segmentation model to emphasize cortical regions in the 3D images generated from 2D inputs. In the second stage, two encoders extract features from the original and super-resoulution images. Through a shared decoder and the mask-swap module with multi-trocess training strategy, cortical surface reconstruction is achieved by mapping features from both the original and super-resolution images to a unified latent space. Experiments on the developing Human Connectome Project (dHCP) dataset demonstrate a significant improvement in geometric accuracy over the leading-SR based cortical surface reconstruction methods, facilitating precise cortical surface reconstruction from 2D images.", "title":"Cortical Surface Reconstruction from 2D MRI with Segmentation-Constrained Super-Resolution and Representation Learning", "authors":[ "Wu, Wenxuan", "Qu, Ruowen", "Shi, Dongzi", "Xiong, Tong", "Xu, Xiangmin", "Xing, Xiaofen", "Zhang, Xin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/SCUT-Xinlab\/CSR-from-2D-MRI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":807 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3457_paper.pdf", "bibtext":"@InProceedings{ Ibr_SemiSupervised_MICCAI2024,\n author = { Ibrahim, Yasin and Warr, Hermione and Kamnitsas, Konstantinos },\n title = { { Semi-Supervised Learning for Deep Causal Generative Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Developing models that are capable of answering\nquestions of the form \u201cHow would x change if y had been z?\u201d is fundamental to advancing medical image analysis. Training causal generative models that address such counterfactual questions, though, currently requires that all relevant variables have been observed and that the corresponding labels are available in the training data. However, clinical data may not have complete records for all patients and state of the art causal generative models are unable to take full advantage of this. We thus develop, for the first time, a semi-supervised deep causal generative model that exploits the causal relationships between variables to maximise the use of all available data. We explore this in the setting where each sample is either fully labelled or fully unlabelled, as well as the more clinically realistic case of having different labels missing for each sample. We leverage techniques from causal inference to infer missing values and subsequently generate realistic counterfactuals, even for samples with incomplete labels. Code is available at: https:\/\/github.com\/yi249\/ssl-causal", "title":"Semi-Supervised Learning for Deep Causal Generative Models", "authors":[ "Ibrahim, Yasin", "Warr, Hermione", "Kamnitsas, Konstantinos" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yi249\/ssl-causal" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":808 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0983_paper.pdf", "bibtext":"@InProceedings{ Yan_Inject_MICCAI2024,\n author = { Yang, Ziyuan and Chen, Yingyu and Sun, Mengyu and Zhang, Yi },\n title = { { Inject Backdoor in Measured Data to Jeopardize Full-Stack Medical Image Analysis System } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep learning has achieved remarkable success in the medical domain, which makes it crucial to assess its vulnerabilities in medical systems. This study examines backdoor attack (BA) methods to evaluate the reliability and security of medical image analysis systems. However, most BA methods focus on isolated downstream tasks and are considered post-imaging attacks, missing a comprehensive security assessment of the full-stack medical image analysis systems from data acquisition to analysis. Reconstructing images from measured data for downstream tasks requires complex transformations, which challenge the design of triggers in the measurement domain. Typically, hackers only access measured data in scanners. To tackle this challenge, this paper introduces a novel Learnable Trigger Generation Method~(LTGM) for measured data. This pre-imaging attack method aims to attack the downstream task without compromising the reconstruction process or imaging quality. LTGM employs a trigger function in the measurement domain to inject a learned trigger into the measured data. To avoid the bias from handcrafted knowledge, this trigger is formulated by learning from the gradients of two key tasks: reconstruction and analysis. Crucially, LTGM\u2019s trigger strives to balance its impact on analysis with minimal additional noise and artifacts in the reconstructed images by carefully analyzing gradients from both tasks. Comprehensive experiments have been conducted to demonstrate the vulnerabilities in full-stack medical systems and to validate the effectiveness of the proposed method using the public dataset. Our code is available at https:\/\/github.com\/Deep-Imaging-Group\/LTGM.", "title":"Inject Backdoor in Measured Data to Jeopardize Full-Stack Medical Image Analysis System", "authors":[ "Yang, Ziyuan", "Chen, Yingyu", "Sun, Mengyu", "Zhang, Yi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Deep-Imaging-Group\/LTGM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":809 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1949_paper.pdf", "bibtext":"@InProceedings{ Li_VCLIPSeg_MICCAI2024,\n author = { Li, Lei and Lian, Sheng and Luo, Zhiming and Wang, Beizhan and Li, Shaozi },\n title = { { VCLIPSeg: Voxel-wise CLIP-Enhanced model for Semi-Supervised Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Semi-supervised learning has emerged as a critical approach for addressing medical image segmentation with limited annotation, and pseudo labeling-based methods made significant progress for this task. However, the varying quality of pseudo labels poses a challenge to model generalization. In this paper, we propose a Voxel-wise CLIP-enhanced model for semi-supervised medical image Segmentation (VCLIPSeg). Our model incorporates three modules: Voxel-Wise Prompts Module (VWPM), Vision-Text Consistency Module (VTCM), and Dynamic Labeling Branch (DLB). The VWPM integrates CLIP embeddings in a voxel-wise manner, learning the semantic relationships among pixels. The VTCM constrains the image prototype features, reducing the impact of noisy data. The DLB adaptively generates pseudo-labels, effectively leveraging the unlabeled data. Experimental results on the Left Atrial (LA) dataset and Pancreas-CT dataset demonstrate the superiority of our method over state-of-the-art approaches in terms of the Dice score. For instance, it achieves a Dice score of 88.51% using only 5% labeled data from the LA dataset.", "title":"VCLIPSeg: Voxel-wise CLIP-Enhanced model for Semi-Supervised Medical Image Segmentation", "authors":[ "Li, Lei", "Lian, Sheng", "Luo, Zhiming", "Wang, Beizhan", "Li, Shaozi" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":810 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2098_paper.pdf", "bibtext":"@InProceedings{ Li_SDFPlane_MICCAI2024,\n author = { Li, Hao and Shan, Jiwei and Wang, Hesheng },\n title = { { SDFPlane: Explicit Neural Surface Reconstruction of Deformable Tissues } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Three-dimensional reconstruction of soft tissues from stereoscopic surgical videos is crucial for enhancing various medical applications. Existing methods often struggle to generate accurate soft tissue geometries or suffer from slow network convergence. To address these challenges, we introduce SDFPlane, an innovative method for fast and precise geometric reconstruction of surgical scenes. This approach efficiently captures scene deformation using a spatial-temporal structure encoder and combines an SDF decoder with a color decoder to accurately model the scene\u2019s geometry and color. Subsequently, we synthesize color images and depth maps with SDF-based volume rendering. Additionally, we implement an error-guided importance sampling strategy, which directs the network\u2019s focus towards areas that are not fully optimized during training. Comparative analysis on multiple public datasets demonstrates that SDFPlane accelerates optimization by over 10\u00d7 compared to existing SDF-based methods while maintaining state-of-the-art rendering quality. Code is available at:https:\/\/github.com\/IRMVLab\/SDFPlane.git", "title":"SDFPlane: Explicit Neural Surface Reconstruction of Deformable Tissues", "authors":[ "Li, Hao", "Shan, Jiwei", "Wang, Hesheng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/IRMVLab\/SDFPlane" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":811 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0782_paper.pdf", "bibtext":"@InProceedings{ Zha_MoreStyle_MICCAI2024,\n author = { Zhao, Haoyu and Dong, Wenhui and Yu, Rui and Zhao, Zhou and Du, Bo and Xu, Yongchao },\n title = { { MoreStyle: Relax Low-frequency Constraint of Fourier-based Image Reconstruction in Generalizable Medical Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"The task of single-source domain generalization (SDG) in medical image segmentation is crucial due to frequent domain shifts in clinical image datasets. To address the challenge of poor generalization across different domains, we introduce a Plug-and-Play module for data augmentation called MoreStyle. MoreStylediversifies image styles by relaxing low-frequency constraints in Fourier space, guiding the image reconstruction network. With the help of adversarial learning, MoreStylefurther expands the style range and pinpoints the most intricate style combinations within latent features. To handle significant style variations, we introduce an uncertainty-weighted loss. This loss emphasizes hard-to-classify pixels resulting only from style shifts while mitigating true hard-to-classify pixels in both MoreStyle-generated and original images. Extensive experiments on two widely used benchmarks demonstrate that the proposed MoreStyle effectively helps to achieve good domain generalization ability, and has the potential to further boost the performance of some state-of-the-art SDG methods.", "title":"MoreStyle: Relax Low-frequency Constraint of Fourier-based Image Reconstruction in Generalizable Medical Image Segmentation", "authors":[ "Zhao, Haoyu", "Dong, Wenhui", "Yu, Rui", "Zhao, Zhou", "Du, Bo", "Xu, Yongchao" ], "id":"Conference", "arxiv_id":"2403.11689", "GitHub":[ "https:\/\/github.com\/zhaohaoyu376\/morestyle" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":812 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1001_paper.pdf", "bibtext":"@InProceedings{ Gu_3DDX_MICCAI2024,\n author = { Gu, Yi and Otake, Yoshito and Uemura, Keisuke and Takao, Masaki and Soufi, Mazen and Okada, Seiji and Sugano, Nobuhiko and Talbot, Hugues and Sato, Yoshinobu },\n title = { { 3DDX: Bone Surface Reconstruction from a Single Standard-Geometry Radiograph via Dual-Face Depth Estimation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Radiography is widely used in orthopedics for its affordability and low radiation exposure. 3D reconstruction from a single radiograph, so-called 2D-3D reconstruction, offers the possibility of various clinical applications, but achieving clinically viable accuracy and computational efficiency is still an unsolved challenge. Unlike other areas in computer vision, X-ray imaging\u2019s unique properties, such as ray penetration and standard geometry, have not been fully exploited. We propose a novel approach that simultaneously learns multiple depth maps (front and back surfaces of multiple bones) derived from the X-ray image to computed tomography (CT) registration. The proposed method not only leverages the standard geometry characteristic of X-ray imaging but also enhances the precision of the reconstruction of the whole surface. Our study involved 600 CT and 2651 X-ray images (4 to 5 posed X-ray images per patient), demonstrating our method\u2019s superiority over traditional approaches with a surface reconstruction error reduction from 4.78 mm to 1.96 mm and further to 1.76 mm using higher resolution and pretraining. This significant accuracy improvement and enhanced computational efficiency suggest our approach\u2019s potential for clinical application.", "title":"3DDX: Bone Surface Reconstruction from a Single Standard-Geometry Radiograph via Dual-Face Depth Estimation", "authors":[ "Gu, Yi", "Otake, Yoshito", "Uemura, Keisuke", "Takao, Masaki", "Soufi, Mazen", "Okada, Seiji", "Sugano, Nobuhiko", "Talbot, Hugues", "Sato, Yoshinobu" ], "id":"Conference", "arxiv_id":"2409.16702", "GitHub":[ "https:\/\/github.com\/Kayaba-Akihiko\/3DDX" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":813 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2320_paper.pdf", "bibtext":"@InProceedings{ Sha_Fewshot_MICCAI2024,\n author = { Shakeri, Fereshteh and Huang, Yunshi and Silva-Rodriguez, Julio and Bahig, Houda and Tang, An and Dolz, Jose and Ben Ayed, Ismail },\n title = { { Few-shot Adaptation of Medical Vision-Language Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Integrating image and text data through multi-modal learning has emerged as a new approach in medical imaging research, following its successful deployment in computer vision. While considerable efforts have been dedicated to establishing medical foundation models and their zero-shot transfer to downstream tasks, the popular few-shot setting remains relatively unexplored. Following on from the currently strong emergence of this setting in computer vision, we introduce the first structured benchmark for adapting medical vision-language models (VLMs) in a strict few-shot regime and investigate various adaptation strategies commonly used in the context of natural images. Furthermore, we evaluate a simple generalization of the linear-probe adaptation baseline, which seeks an optimal blending of the visual prototypes and text embeddings via learnable class-wise multipliers. Surprisingly, such a text-informed linear probe yields competitive performances in comparison to convoluted prompt-learning and adapter-based strategies, while running considerably faster and accommodating the black-box setting. Our extensive experiments span three different medical modalities and specialized foundation models, nine downstream tasks, and several state-of-the-art few-shot adaptation methods. We made our benchmark and code publicly available to trigger further developments in this emergent subject.", "title":"Few-shot Adaptation of Medical Vision-Language Models", "authors":[ "Shakeri, Fereshteh", "Huang, Yunshi", "Silva-Rodriguez, Julio", "Bahig, Houda", "Tang, An", "Dolz, Jose", "Ben Ayed, Ismail" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/FereshteShakeri\/few-shot-MedVLMs" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":814 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4190_paper.pdf", "bibtext":"@InProceedings{ Dha_VLSMAdapter_MICCAI2024,\n author = { Dhakal, Manish and Adhikari, Rabin and Thapaliya, Safal and Khanal, Bishesh },\n title = { { VLSM-Adapter: Finetuning Vision-Language Segmentation Efficiently with Lightweight Blocks } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Foundation Vision-Language Models (VLMs) trained using large-scale open-domain images and text pairs have recently been adapted to develop Vision-Language Segmentation Models (VLSMs) that allow providing text prompts during inference to guide image segmentation. If robust and powerful VLSMs can be built for medical images, it could aid medical professionals in many clinical tasks where they must spend substantial time delineating the target structure of interest. VLSMs for medical images resort to fine-tuning base VLM or VLSM pretrained on open-domain natural image datasets due to fewer annotated medical image datasets; this fine-tuning is resource-consuming and expensive as it usually requires updating all or a significant fraction of the pretrained parameters. Recently, lightweight blocks called adapters have been proposed in VLMs that keep the pretrained model frozen and only train adapters during fine-tuning, substantially reducing the computing resources required. We introduce a novel adapter, VLSM-Adapter, that can fine-tune pretrained vision-language segmentation models using transformer encoders. Our experiments in widely used CLIP-based segmentation models show that with only 3 million trainable parameters, the VLSM-Adapter outperforms state-of-the-art and is comparable to the upper bound end-to-end fine-tuning. The source code is available at: https:\/\/github.com\/naamiinepal\/vlsm-adapter.", "title":"VLSM-Adapter: Finetuning Vision-Language Segmentation Efficiently with Lightweight Blocks", "authors":[ "Dhakal, Manish", "Adhikari, Rabin", "Thapaliya, Safal", "Khanal, Bishesh" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/naamiinepal\/vlsm-adapter" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":815 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1475_paper.pdf", "bibtext":"@InProceedings{ Zhu_Multivariate_MICCAI2024,\n author = { Zhu, Zhihong and Cheng, Xuxin and Zhang, Yunyan and Chen, Zhaorun and Long, Qingqing and Li, Hongxiang and Huang, Zhiqi and Wu, Xian and Zheng, Yefeng },\n title = { { Multivariate Cooperative Game for Image-Report Pairs: Hierarchical Semantic Alignment for Medical Report Generation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical report generation (MRG) has great clinical potential, which could relieve radiologists from the heavy workloads of report writing. One of the core challenges in MRG is establishing accurate cross-modal semantic alignment between radiology images and their corresponding reports. Toward this goal, previous methods made great attempts to model from case-level alignment to more fine-grained region-level alignment. Although achieving promising results, they (1) either perform implicit alignment through end-to-end training or heavily rely on extra manual annotations and pre-training tools; (2) neglect to leverage the high-level inter-subject relationship semantic (e.g., disease) alignment. In this paper, we present Hierarchical Semantic Alignment (HSA) for MRG in a unified game theory based framework, which achieves semantic alignment at multiple levels. To solve the first issue, we treat image regions and report words as binary game players and value possible alignment between them, thus achieving explicit and adaptive alignment in a self-supervised manner at region-level. To solve the second issue, we treat images, reports, and diseases as ternary game players, which enforces the cross-modal cluster assignment consistency at disease-level. Extensive experiments and analyses on IU-Xray and MIMIC-CXR benchmark datasets demonstrate the superiority of our proposed HSA against various state-of-the-art methods.", "title":"Multivariate Cooperative Game for Image-Report Pairs: Hierarchical Semantic Alignment for Medical Report Generation", "authors":[ "Zhu, Zhihong", "Cheng, Xuxin", "Zhang, Yunyan", "Chen, Zhaorun", "Long, Qingqing", "Li, Hongxiang", "Huang, Zhiqi", "Wu, Xian", "Zheng, Yefeng" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":816 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0195_paper.pdf", "bibtext":"@InProceedings{ Ji_Diffusionbased_MICCAI2024,\n author = { Ji, Wen and Chung, Albert C. S. },\n title = { { Diffusion-based Domain Adaptation for Medical Image Segmentation using Stochastic Step Alignment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"The purpose of this study is to improve Unsupervised Domain Adaptation (UDA) by utilizing intermediate image distributions from the source domain to the target-like domain during the image generation process. However, image generators like Generative Adversarial Networks (GANs) can be regarded as black boxes due to their complex internal workings, and we can only access the final generated image. This limitation makes it unable for UDA to use the available knowledge of the intermediate distribution produced in the generation process when executing domain alignment. To address this problem, we propose a novel UDA framework that utilizes diffusion models to capture and transfer an amount of inter-domain knowledge, thereby mitigating the domain shift problem. A coupled structure-preserved diffusion model is designed to synthesize intermediate images in multiple steps, making the intermediate image distributions accessible. A stochastic step alignment strategy is further developed to align feature distributions, resulting in improved adaptation ability. The effectiveness of the proposed method is demonstrated through experiments on abdominal multi-organ segmentation.", "title":"Diffusion-based Domain Adaptation for Medical Image Segmentation using Stochastic Step Alignment", "authors":[ "Ji, Wen", "Chung, Albert C. S." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":817 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3200_paper.pdf", "bibtext":"@InProceedings{ Raj_Death_MICCAI2024,\n author = { Rajput, Junaid R. and Weinmueller, Simon and Endres, Jonathan and Dawood, Peter and Knoll, Florian and Maier, Andreas and Zaiss, Moritz },\n title = { { Death by Retrospective Undersampling - Caveats and Solutions for Learning-Based MRI Reconstructions } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"This study challenges the validity of retrospective undersampling\nin MRI data science by analysis via an MRI physics simulation. We\ndemonstrate that retrospective undersampling, a method often used to\ncreate training data for reconstruction models, can inherently alter MRI\nsignals from their prospective counterparts. This arises from the sequential\nnature of MRI acquisition, where undersampling post-acquisition\neffectively alters the MR sequence and the magnetization dynamic in a\nnon-linear fashion. We show that even in common sequences, this effect\ncan make learning-based reconstructions unreliable. Our simulation provides\nboth, (i) a tool for generating accurate prospective undersampled\ndatasets for analysis of such effects, or for MRI training data augmentation,\nand (ii) a differentiable reconstruction operator that models undersampling\ncorrectly. The provided insights are crucial for the development\nand evaluation of AI-driven acceleration of diagnostic MRI tools.", "title":"Death by Retrospective Undersampling - Caveats and Solutions for Learning-Based MRI Reconstructions", "authors":[ "Rajput, Junaid R.", "Weinmueller, Simon", "Endres, Jonathan", "Dawood, Peter", "Knoll, Florian", "Maier, Andreas", "Zaiss, Moritz" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":818 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1159_paper.pdf", "bibtext":"@InProceedings{ Che_MMQL_MICCAI2024,\n author = { Chen, Qishen and Bian, Minjie and Xu, Huahu },\n title = { { MMQL: Multi-Question Learning for Medical Visual Question Answering } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Medical visual question answering (Med-VQA) aims to answer medical questions with given medical images. Current methods are all designed to answer a single question with its image. Still, medical diagnoses are based on multiple factors, so questions related to the same image should be answered together. This paper proposes a novel multi-question learning method to capture the correlation among questions. Notably, for one image, all related questions are given predictions simultaneously. For those images that already have some questions answered, the answered questions can be used as prompts for better diagnosis. Further, to deal with the error prompts, an entropy-based prompt prune algorithm is designed. A shuffle-based algorithm is designed to make the model less sensitive to the sequence of input questions. In the experiment, patient-level accuracy is designed to compare the reliability of the models and reflect the effectiveness of our multi-question learning for Med-VQA. The results show our methods on top of recent state-of-the-art Med-VQA models on both VQA-RAD and SLAKE, with a 3.77% and 4.24% improvement of overall accuracy, respectively. And a 6.90% and 15.63% improvement in patient-level accuracy. The codes are available at: https:\/\/github.com\/shanziSZ\/MMQL.", "title":"MMQL: Multi-Question Learning for Medical Visual Question Answering", "authors":[ "Chen, Qishen", "Bian, Minjie", "Xu, Huahu" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/shanziSZ\/MMQL" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":819 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0179_paper.pdf", "bibtext":"@InProceedings{ Zha_DepthAware_MICCAI2024,\n author = { Zhang, Francis Xiatian and Chen, Shuang and Xie, Xianghua and Shum, Hubert P. H. },\n title = { { Depth-Aware Endoscopic Video Inpainting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Video inpainting fills in corrupted video content with plausible replacements. While recent advances in endoscopic video inpainting have shown potential for enhancing the quality of endoscopic videos, they mainly repair 2D visual information without effectively preserving crucial 3D spatial details for clinical reference. Depth-aware inpainting methods attempt to preserve these details by incorporating depth information. Still, in endoscopic contexts, they face challenges including reliance on pre-acquired depth maps, less effective fusion designs, and ignorance of the fidelity of 3D spatial details.\nTo address them, we introduce a novel Depth-aware Endoscopic Video Inpainting (DAEVI) framework. It features a Spatial-Temporal Guided Depth Estimation module for direct depth estimation from visual features, a Bi-Modal Paired Channel Fusion module for effective channel-by-channel fusion of visual and depth information, and a Depth Enhanced Discriminator to assess the fidelity of the RGB-D sequence comprised of the inpainted frames and estimated depth images.\nExperimental evaluations on established benchmarks demonstrate our framework\u2019s superiority, achieving a 2% improvement in PSNR and a 6% reduction in MSE compared to state-of-the-art methods. Qualitative analyses further validate its enhanced ability to inpaint fine details, highlighting the benefits of integrating depth information into endoscopic inpainting.", "title":"Depth-Aware Endoscopic Video Inpainting", "authors":[ "Zhang, Francis Xiatian", "Chen, Shuang", "Xie, Xianghua", "Shum, Hubert P. H." ], "id":"Conference", "arxiv_id":"2407.02675", "GitHub":[ "https:\/\/github.com\/FrancisXZhang\/DAEVI" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":820 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1957_paper.pdf", "bibtext":"@InProceedings{ Lia_Leveraging_MICCAI2024,\n author = { Liang, Xiao and Wang, Yin and Wang, Di and Jiao, Zhicheng and Zhong, Haodi and Yang, Mengyu and Wang, Quan },\n title = { { Leveraging Coarse-to-Fine Grained Representations in Contrastive Learning for Differential Medical Visual Question Answering } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Chest X-ray Differential Medical Visual Question Answering (Diff-MedVQA) is a novel multi-modal task designed to answer questions about diseases, especially their differences, based on a main image and a reference image. Compared to the widely explored visual question answering in the general domain, Diff-MedVQA presents two unique issues: (1) variations in medical images are often subtle, and (2) it is impossible for two chest X-rays taken at different times to be at exactly the same view. These issues significantly hinder the ability to answer questions about medical image differences accurately. To address this, we introduce a two-stage framework featuring Coarse-to-Fine Granularity Contrastive Learning. Specifically, our method initially employs an anatomical encoder and a disease classifier to obtain fine-grained visual features of main and reference images. It then integrates the anatomical knowledge graph to strengthen the relationship between anatomical and disease regions, while Multi-Change Captioning transformers identify the subtle differences between main and reference features. During pre-training, Coarse-to-Fine Granularity Contrastive Learning is used to align knowledge enhanced visual differences with keyword features like anatomical parts, symptoms, and diseases. During the Diff-MedVQA Fine-tuning, the model treats the differential features as context-grounded queries, with Language Modeling guiding answer generation. Extensive experiments on the MIMIC-CXR-Diff dataset validate the effectiveness of our proposed method.", "title":"Leveraging Coarse-to-Fine Grained Representations in Contrastive Learning for Differential Medical Visual Question Answering", "authors":[ "Liang, Xiao", "Wang, Yin", "Wang, Di", "Jiao, Zhicheng", "Zhong, Haodi", "Yang, Mengyu", "Wang, Quan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/big-white-rabbit\/Coarse-to-Fine-Grained-Contrastive-Learning" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":821 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0608_paper.pdf", "bibtext":"@InProceedings{ Abb_Sparse_MICCAI2024,\n author = { Abboud, Zeinab and Lombaert, Herve and Kadoury, Samuel },\n title = { { Sparse Bayesian Networks: Efficient Uncertainty Quantification in Medical Image Analysis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Efficiently quantifying predictive uncertainty in medical images remains a challenge. While Bayesian neural networks (BNN) offer reliable predictive uncertainty, they require substantial computational resources to train. Although Bayesian approximations such as ensembles have shown promise, they still suffer from high training costs. Existing approaches to reducing computational burden primarily focus on lowering the costs of BNN inference, with limited efforts to improve training efficiency and minimize parameter complexity. This study introduces a training procedure for a sparse (partial) Bayesian network. Our method selectively assigns a subset of parameters as Bayesian by assessing their deterministic saliency through gradient sensitivity analysis. The resulting network combines deterministic and Bayesian parameters, exploiting the advantages of both representations to achieve high task-specific performance and minimize predictive uncertainty. Demonstrated on multi-label ChestMNIST for classification and ISIC, LIDC-IDRI for segmentation, our approach achieves competitive performance and predictive uncertainty estimation by reducing Bayesian parameters by over 95%, significantly reducing computational expenses compared to fully Bayesian and ensemble methods.", "title":"Sparse Bayesian Networks: Efficient Uncertainty Quantification in Medical Image Analysis", "authors":[ "Abboud, Zeinab", "Lombaert, Herve", "Kadoury, Samuel" ], "id":"Conference", "arxiv_id":"2406.06946", "GitHub":[ "https:\/\/github.com\/zabboud\/SparseBayesianNetwork" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":822 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2563_paper.pdf", "bibtext":"@InProceedings{ Li_Blind_MICCAI2024,\n author = { Li, Xing and Yang, Yan and Zheng, Hairong and Xu, Zongben },\n title = { { Blind Proximal Diffusion Model for Joint Image and Sensitivity Estimation in Parallel MRI } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Parallel imaging (PI) has demonstrated notable efficiency in accelerating magnetic resonance imaging (MRI) using deep learning techniques. However, these models often face challenges regarding their adaptability and robustness across varying data acquisition.\nIn this work, we introduce a novel joint estimation framework for MR image reconstruction and multi-channel sensitivity maps utilizing denoising diffusion models under blind settings, termed Blind Proximal Diffusion Model in Parallel MRI (BPDM-PMRI). BPDM-PMRI formulates the reconstruction problem as a non-convex optimization task for simultaneous estimation of MR images and sensitivity maps across multiple channels. We employ the proximal alternating linearized minimization (PALM) to iteratively update the reconstructed MR images and sensitivity maps. Distinguished from the traditional proximal operators, our diffusion-based proximal operators provide a more generalizable and stable prior characterization. Once the diffusion model is trained, it can be applied to various sampling trajectories. Comprehensive experiments conducted on publicly available MR datasets demonstrate that BPDM-PMRI outperforms existing methods in terms of denoising effectiveness and generalization capability, while keeping clinically acceptable inference times.", "title":"Blind Proximal Diffusion Model for Joint Image and Sensitivity Estimation in Parallel MRI", "authors":[ "Li, Xing", "Yang, Yan", "Zheng, Hairong", "Xu, Zongben" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":823 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1863_paper.pdf", "bibtext":"@InProceedings{ Zha_Hierarchical_MICCAI2024,\n author = { Zhang, Hao and Zhao, Mingyue and Liu, Mingzhu and Luo, Jiejun and Guan, Yu and Zhang, Jin and Xia, Yi and Zhang, Di and Zhou, Xiuxiu and Fan, Li and Liu, Shiyuan and Zhou, S. Kevin },\n title = { { Hierarchical multiple instance learning for COPD grading with relatively specific similarity } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Chronic obstructive pulmonary disease (COPD) is a type of obstructive lung disease characterized by persistent airflow limitation and ranks as the third leading cause of death globally. As a heterogeneous lung disorder, the diversity of COPD phenotypes and the complexity of its pathology pose significant challenges for recognizing its grade. Many existing deep learning models based on 3D CT scans overlook the spatial position information of lesion regions and the correlation within different lesion grades. To this, we define the COPD grading task as a multiple instance learning (MIL) task and propose a hierarchical multiple instance learning (H-MIL) model. Unlike previous MIL models, our H-MIL model pays more attention to the spatial position information of patches and achieves a fine-grained classification of COPD by extracting patch features in a multi-level and granularity-oriented manner. Furthermore, we recognize the significant correlations within lesions of different grades and propose a Relatively Specific Similarity (RSS) function to capture such relative correlations. We demonstrate that H-MIL achieves better performances than competing methods on an internal dataset comprising 2,142 CT scans. Additionally, we validate the effectiveness of the model architecture and loss design through an ablation study. and the robustness of our model on different central datasets.", "title":"Hierarchical multiple instance learning for COPD grading with relatively specific similarity", "authors":[ "Zhang, Hao", "Zhao, Mingyue", "Liu, Mingzhu", "Luo, Jiejun", "Guan, Yu", "Zhang, Jin", "Xia, Yi", "Zhang, Di", "Zhou, Xiuxiu", "Fan, Li", "Liu, Shiyuan", "Zhou, S. Kevin" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Mars-Zhang123\/H-MIL.git" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":824 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3565_paper.pdf", "bibtext":"@InProceedings{ He_SANGRE_MICCAI2024,\n author = { He, Ying and Miquel, Marc E. and Zhang, Qianni },\n title = { { SANGRE: a Shallow Attention Network Guided by Resolution Expansion for MR Image Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"Magnetic Resonance (MR) imaging plays a vital role in clinical diagnostics and treatment planning, with the accurate segmentation of MR images being of paramount importance. Vision transformers have demonstrated remarkable success in medical image segmentation; however, they fall short in capturing the local context. While images of larger sizes provide broad contextual information, such as shape and texture, training deep learning models on such large images demands additional computational resources. To overcome these challenges, we introduce a shallow attention feature aggregation (SAFA) module to progressively enhance features\u2019 local context and filter out redundant features. Moreover, we use feature interactions in a resolution expansion guidance (REG) module to leverage the wide contextual information from the images at higher resolution, ensuring adequate exploitation of small class features, leading to a more accurate segmentation without a significant increase in FLOPs. The model is evaluated on two dynamic MR datasets for speech and cardiac cases. The proposed model outperforms other state-of-the-art methods. The codes are available at https:\/\/github.com\/Yhe9718\/SANGRE.", "title":"SANGRE: a Shallow Attention Network Guided by Resolution Expansion for MR Image Segmentation", "authors":[ "He, Ying", "Miquel, Marc E.", "Zhang, Qianni" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/Yhe9718\/SANGRE" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":825 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2347_paper.pdf", "bibtext":"@InProceedings{ Yoo_Volumetric_MICCAI2024,\n author = { Yoon, Siyeop and Tivnan, Matthew and Hu, Rui and Wang, Yuang and Son, Young-don and Wu, Dufan and Li, Xiang and Kim, Kyungsang and Li, Quanzheng },\n title = { { Volumetric Conditional Score-based Residual Diffusion Model for PET\/MR Denoising } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"PET imaging is a powerful modality offering quantitative assessments of molecular and physiological processes. The necessity for PET denoising arises from the intrinsic high noise levels in PET imaging, which can significantly hinder the accurate interpretation and quantitative analysis of the scans. With advances in deep learning techniques, diffusion model-based PET denoising techniques have shown remarkable performance improvement. However, these models often face limitations when applied to volumetric data. Additionally, many existing diffusion models do not adequately consider the unique characteristics of PET imaging, such as its 3D volumetric nature, leading to the potential loss of anatomic consistency. Our Conditional Score-based Residual Diffusion (CSRD) model addresses these issues by incorporating a refined score function and 3D patch-wise training strategy, optimizing the model for efficient volumetric PET denoising. The CSRD model significantly lowers computational demands and expedites the denoising process. By effectively integrating volumetric data from PET and MRI scans, the CSRD model maintains spatial coherence and anatomical detail. Lastly, we demonstrate that the CSRD model achieves superior denoising performance in both qualitative and quantitative evaluations while maintaining image details and outperforms existing state-of-the-art methods. Our code is available at: \\url{https:\/\/github.com\/siyeopyoon\/Residual-Diffusion-Model-for-PET-MR-Denoising}", "title":"Volumetric Conditional Score-based Residual Diffusion Model for PET\/MR Denoising", "authors":[ "Yoon, Siyeop", "Tivnan, Matthew", "Hu, Rui", "Wang, Yuang", "Son, Young-don", "Wu, Dufan", "Li, Xiang", "Kim, Kyungsang", "Li, Quanzheng" ], "id":"Conference", "arxiv_id":"2410.00184", "GitHub":[ "https:\/\/github.com\/siyeopyoon\/Residual-Diffusion-Model-for-PET-MR-Denoising" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":826 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2121_paper.pdf", "bibtext":"@InProceedings{ Xio_TAKT_MICCAI2024,\n author = { Xiong, Conghao and Lin, Yi and Chen, Hao and Zheng, Hao and Wei, Dong and Zheng, Yefeng and Sung, Joseph J. Y. and King, Irwin },\n title = { { TAKT: Target-Aware Knowledge Transfer for Whole Slide Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Knowledge transfer from a source to a target domain is vital for whole slide image classification, given the limited dataset size due to high annotation costs. However, domain shift and task discrepancy between datasets can impede this process. To address these issues, we propose a Target-Aware Knowledge Transfer framework using a teacher-student paradigm, enabling a teacher model to learn common knowledge from both domains by actively incorporating unlabelled target images into the teacher model training. The teacher bag features are subsequently adapted to supervise the student model training on the target domain. Despite incorporating the target features during training, the teacher model tends to neglect them under inherent domain shift and task discrepancy. To alleviate this, we introduce a target-aware feature alignment module to establish a transferable latent relationship between the source and target features by solving an optimal transport problem. Experimental results show that models employing knowledge transfer outperform those trained from scratch, and our method achieves state-of-the-art performance among other knowledge transfer methods on various datasets, including TCGA-RCC, TCGA-NSCLC, and Camelyon16. Codes are released at https:\/\/github.com\/BearCleverProud\/TAKT.", "title":"TAKT: Target-Aware Knowledge Transfer for Whole Slide Image Classification", "authors":[ "Xiong, Conghao", "Lin, Yi", "Chen, Hao", "Zheng, Hao", "Wei, Dong", "Zheng, Yefeng", "Sung, Joseph J. Y.", "King, Irwin" ], "id":"Conference", "arxiv_id":"2303.05780", "GitHub":[ "https:\/\/github.com\/BearCleverProud\/TAKT" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":827 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0394_paper.pdf", "bibtext":"@InProceedings{ Cha_Decoding_MICCAI2024,\n author = { Chakraborty, Souradeep and Gupta, Rajarsi and Yaskiv, Oksana and Friedman, Constantin and Sheuka, Natallia and Perez, Dana and Friedman, Paul and Zelinsky, Gregory and Saltz, Joel and Samaras, Dimitris },\n title = { { Decoding the visual attention of pathologists to reveal their level of expertise } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present a method for classifying the expertise of a pathologist based on how they allocated their attention during a cancer reading. We engage this decoding task by developing a novel method for predicting the attention of pathologists as they read Whole-Slide Images (WSIs) of prostate tissue and make cancer grade classifications. Our ground truth measure of a pathologists\u2019 attention is the x, y and z (magnification) movement of their viewport as they navigated through WSIs during readings, and to date we have the attention behavior of 43 pathologists reading 123 WSIs. These data revealed that specialists have higher agreement in both their attention and cancer grades compared to general pathologists and residents, suggesting that sufficient information may exist in their attention behavior to classify their expertise level. To attempt this, we trained a transformer-based model to predict the visual attention heatmaps of resident, general, and specialist (Genitourinary) pathologists during Gleason grading. Based solely on a pathologist\u2019s attention during a reading, our model was able to predict their level of expertise with 75.3%, 56.1%, and 77.2% accuracy, respectively, better than chance and baseline models. Our model therefore enables a pathologist\u2019s expertise level to be easily and objectively evaluated, important for pathology training and competency assessment. Tools developed from our model could be used to help pathology trainees learn how to read WSIs like an expert.", "title":"Decoding the visual attention of pathologists to reveal their level of expertise", "authors":[ "Chakraborty, Souradeep", "Gupta, Rajarsi", "Yaskiv, Oksana", "Friedman, Constantin", "Sheuka, Natallia", "Perez, Dana", "Friedman, Paul", "Zelinsky, Gregory", "Saltz, Joel", "Samaras, Dimitris" ], "id":"Conference", "arxiv_id":"2403.17255", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":828 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2277_paper.pdf", "bibtext":"@InProceedings{ Das_SimBrainNet_MICCAI2024,\n author = { Das Chakladar, Debashis and Simistira Liwicki, Foteini and Saini, Rajkumar },\n title = { { SimBrainNet: Evaluating Brain Network Similarity for Attention Disorders } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Electroencephalography (EEG)-based attention disorder research seeks to understand brain activity patterns associated with attention. Previous studies have mainly focused on identifying brain regions involved in cognitive processes or classifying Attention-Deficit Hyperactivity Disorder (ADHD) and control subjects. However, analyzing effective brain connectivity networks for specific attentional processes and comparing them has not been explored. Therefore, in this study, we propose multivariate transfer entropy-based connectivity networks for cognitive events and introduce a new similarity measure, \u201cSimBrainNet\u201d, to assess these networks. A high similarity score suggests similar brain dynamics during cognitive events, indicating less attention variability. Our experiment involves 12 individuals with attention disorders (7 children and 5 adolescents). Noteworthy that child participants exhibit lower similarity scores compared to adolescents, indicating greater changes in attention. We found strong connectivity patterns in the left pre-frontal cortex for adolescent individuals compared to the child. Our study highlights the changes in attention levels across various cognitive events, offering insights into the underlying cognitive mechanisms, brain dynamics, and potential deficits in individuals with this disorder.", "title":"SimBrainNet: Evaluating Brain Network Similarity for Attention Disorders", "authors":[ "Das Chakladar, Debashis", "Simistira Liwicki, Foteini", "Saini, Rajkumar" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/DDasChakladar\/SimBrainNet" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":829 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1903_paper.pdf", "bibtext":"@InProceedings{ Mai_Dynamic_MICCAI2024,\n author = { Maitre, Thomas and Bretin, Elie and Phan, Romain and Ducros, Nicolas and Sdika, Micha\u00ebl },\n title = { { Dynamic Single-Pixel Imaging on an Extended Field of View without Warping the Patterns } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"A single-pixel camera is a spatial-multiplexing device that reconstructs an image from a sequence of projections of the scene onto some patterns. This architecture is used, for example, to assist neurosurgery with hyperspectral imaging. However, capturing dynamic scenes is very challenging: as the different projections measure different frames of the scene, standard reconstruction approaches suffer from strong motion artifacts. This paper presents a general framework to reconstruct a moving scene with two main contributions. First, we extend the field of view of the camera beyond that defined by the spatial light modulator, which dramatically reduces the model mismatch. Second, we propose to build the dynamic system matrix without warping the patterns, effectively dismissing discretization errors. Numerical experiments show that both our contributions are necessary for an artifact-free reconstruction. The influence of a reduced measured set, robustness to noise and to motion errors were also evaluated.", "title":"Dynamic Single-Pixel Imaging on an Extended Field of View without Warping the Patterns", "authors":[ "Maitre, Thomas", "Bretin, Elie", "Phan, Romain", "Ducros, Nicolas", "Sdika, Micha\u00ebl" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/openspyrit\/spyrit" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":830 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1602_paper.pdf", "bibtext":"@InProceedings{ Wan_Learnable_MICCAI2024,\n author = { Wang, Yao and Chen, Jiahao and Huang, Wenjian and Dong, Pei and Qian, Zhen },\n title = { { Learnable Skeleton-Based Medical Landmark Estimation with Graph Sparsity and Fiedler Regularizations } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recent development in heatmap regression-based models have been central to anatomical landmark detection, yet their efficiency is often limited due to the lack of skeletal structure constraints. Despite the notable use of graph convolution networks (GCNs) in human pose estimation and facial landmark detection, manual construction of skeletal structures remains prevalent, presenting challenges in medical contexts with numerous non-intuitive structure. This paper introduces an innovative skeleton construction model for GCNs, integrating graph sparsity and Fiedler regularization, diverging from traditional manual methods. We provide both theoretical validation and a practical implementation of our model, demonstrating its real-world efficacy. Additionally, we have developed two new medical datasets tailored for this research, along with testing on an open dataset. Our results consistently show our method\u2019s superior performance and versatility in anatomical landmark detection, establishing a new benchmark in the field, as evidenced by extensive testing across diverse datasets.", "title":"Learnable Skeleton-Based Medical Landmark Estimation with Graph Sparsity and Fiedler Regularizations", "authors":[ "Wang, Yao", "Chen, Jiahao", "Huang, Wenjian", "Dong, Pei", "Qian, Zhen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":831 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4115_paper.pdf", "bibtext":"@InProceedings{ Nat_Pixel2Mechanics_MICCAI2024,\n author = { Natarajan, Sai and Mu\u00f1oz-Moya, Estefano and Ruiz Wills, Carlos and Piella, Gemma and Noailly, J\u00e9r\u00f4me and Humbert, Ludovic and Gonz\u00e1lez Ballester, Miguel A. },\n title = { { Pixel2Mechanics: Automated biomechanical simulations of high-resolution intervertebral discs from anisotropic MRIs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Intervertebral disc (IVD) degeneration poses demanding challenges for improved diagnosis and treatment personalization. Biomechanical simulations bridge the gap between phenotypes and functional mechanobiology. However, personalized IVD modelling is hindered by complex manual workflows to obtain meshes suitable for biomechanical analysis using clinical MR data. This study proposes Pixel2Mechanics: a novel pipeline for biomechanical finite element (FE) simulation of high-resolution IVD meshes out of low resolution clinical MRI. We use our geometrical deep learning framework incorporating cross-level feature fusion to generate meshes of the lumbar Annuli Fibrosis (AF) and Nuclei Pulposi (NP), from the L1-L2 to L4-L5 IVD. Further, we improve our framework by proposing a novel optimization method based on differentiable rendering. Next, a custom morphing algorithm based on the Bayesian Coherent Point Drift++ approach generates volumetric FE meshes from the surface meshes, preserving tissue topology through the whole cohort while capturing shape specificities. Daily load simulations on these FE model simulations were evaluated in three volumes within the IVD: the center of the NP and the two transition zones (posterior and anterior) on mechanical responses. These were compared with the results obtained with a manual segmentation procedure. This study delivers a fully automated pipeline performing patient-personalized simulations of L1-L2 to L4-L5 IVD spine levels from clinical MRIs. It facilitates functional modeling and further exploration of normal and pathological discs while minimizing manual intervention. These features position the pipeline as a promising candidate for future clinical integration. Our data & code will be made available at: http:\/\/www.pixel2mechanics.github.io", "title":"Pixel2Mechanics: Automated biomechanical simulations of high-resolution intervertebral discs from anisotropic MRIs", "authors":[ "Natarajan, Sai", "Mu\u00f1oz-Moya, Estefano", "Ruiz Wills, Carlos", "Piella, Gemma", "Noailly, J\u00e9r\u00f4me", "Humbert, Ludovic", "Gonz\u00e1lez Ballester, Miguel A." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":832 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3036_paper.pdf", "bibtext":"@InProceedings{ Sha_Hierarchical_MICCAI2024,\n author = { Sha, Qingrui and Sun, Kaicong and Xu, Mingze and Li, Yonghao and Xue, Zhong and Cao, Xiaohuan and Shen, Dinggang },\n title = { { Hierarchical Symmetric Normalization Registration using Deformation-Inverse Network } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Most existing deep learning-based medical image registration methods estimate a single-directional displacement field between the moving and fixed image pair, resulting in registration errors when there are substantial differences between the to-be-registered image pairs. To solve this issue, we propose a symmetric normalization network to estimate the deformations in a bi-directional way. Specifically, our method learns two bi-directional half-way displacement fields, which warp the moving and fixed images to their mean space. Besides, a symmetric magnitude constraint is designed in the mean space to ensure precise registration. Additionally, a deformation-inverse network is employed to obtain the inverse of the displacement field, which is applied to the inference pipeline to compose the final end-to-end displacement field between the moving and fixed images. During inference, our method first estimates the two half-way displacement fields and then composes one half-way displacement field with the inverse of another half. Moreover, we adopt a multi-level strategy to hierarchically perform registration, for gradually aligning images to their mean space, thereby improving accuracy and smoothness. Experimental results on two datasets demonstrate that the proposed method improves registration performance compared with state-of-the-art algorithms. Our code is available at https:\/\/github.com\/QingRui-Sha\/HSyN.", "title":"Hierarchical Symmetric Normalization Registration using Deformation-Inverse Network", "authors":[ "Sha, Qingrui", "Sun, Kaicong", "Xu, Mingze", "Li, Yonghao", "Xue, Zhong", "Cao, Xiaohuan", "Shen, Dinggang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/QingRui-Sha\/HSyN" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":833 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4218_paper.pdf", "bibtext":"@InProceedings{ Bea_Towards_MICCAI2024,\n author = { Beaudet, Karl-Philippe and Karargyris, Alexandros and El Hadramy, Sidaty and Cotin, St\u00e9phane and Mazellier, Jean-Paul and Padoy, Nicolas and Verde, Juan },\n title = { { Towards Real-time Intrahepatic Vessel Identification in Intraoperative Ultrasound-Guided Liver Surgery } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"While laparoscopic liver resection is less prone to complications and maintains patient outcomes compared to traditional open surgery, its complexity hinders widespread adoption due to challenges in representing the liver\u2019s internal structure. Laparoscopic intraoperative ultrasound offers efficient, cost-effective, and radiation-free guidance. Our objective is to aid physicians in identifying internal liver structures using laparoscopic intraoperative ultrasound. We propose a patient-specific approach using preoperative 3D ultrasound liver volume to train a deep learning model for real-time identification of portal tree and branch structures. Our personalized AI model, validated on ex vivo swine livers, achieved superior precision (0.95) and recall (0.93) compared to surgeons, laying groundwork for precise vessel identification in ultrasound-based liver resection. Its adaptability and potential clinical impact promise to advance surgical interventions and improve patient care.", "title":"Towards Real-time Intrahepatic Vessel Identification in Intraoperative Ultrasound-Guided Liver Surgery", "authors":[ "Beaudet, Karl-Philippe", "Karargyris, Alexandros", "El Hadramy, Sidaty", "Cotin, St\u00e9phane", "Mazellier, Jean-Paul", "Padoy, Nicolas", "Verde, Juan" ], "id":"Conference", "arxiv_id":"2410.03420", "GitHub":[ "https:\/\/github.com\/CAMMA-public\/Lupin\/" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":834 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3276_paper.pdf", "bibtext":"@InProceedings{ Sam_LS_MICCAI2024,\n author = { Sambyal, Abhishek Singh and Niyaz, Usma and Shrivastava, Saksham and Krishnan, Narayanan C. and Bathula, Deepti R. },\n title = { { LS+: Informed Label Smoothing for Improving Calibration in Medical Image Classification } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15010 },\n month = {October},\n pages = { pending },\n }", "abstract":"Deep Neural Networks (DNNs) exhibit exceptional performance in various tasks; however, their susceptibility to miscalibration poses challenges in healthcare applications, impacting reliability and trustworthiness. Label smoothing, which prefers soft targets based on uniform distribution over labels, is a widely used strategy to improve model calibration. We propose an improved strategy, Label Smoothing Plus (LS+), which uses class-specific prior that is estimated from validation set to account for current model calibration level. We evaluate the effectiveness of our approach by comparing it with state-of-the-art methods on three benchmark medical imaging datasets, using two different architectures and several performance and calibration metrics for the classification task. Experimental results show notable reduction in calibration error metrics with nominal improvement in performance compared to other approaches, suggesting that our proposed method provides more reliable prediction probabilities. Code is available at https:\/\/github.com\/abhisheksambyal\/lsplus.", "title":"LS+: Informed Label Smoothing for Improving Calibration in Medical Image Classification", "authors":[ "Sambyal, Abhishek Singh", "Niyaz, Usma", "Shrivastava, Saksham", "Krishnan, Narayanan C.", "Bathula, Deepti R." ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/abhisheksambyal\/lsplus" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":835 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1615_paper.pdf", "bibtext":"@InProceedings{ Bae_OCL_MICCAI2024,\n author = { Baek, Seunghun and Sim, Jaeyoon and Wu, Guorong and Kim, Won Hwa },\n title = { { OCL: Ordinal Contrastive Learning for Imputating Features with Progressive Labels } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurately discriminating progressive stages of Alzheimer\u2019s Disease (AD) is crucial for early diagnosis and prevention. It often involves multiple imaging modalities to understand the complex pathology of AD, however, acquiring a complete set of images is challenging due to high cost and burden for subjects. In the end, missing data become inevitable which lead to limited sample-size and decrease in precision in downstream analyses. To tackle this challenge, we introduce a holistic imaging feature imputation method that enables to leverage diverse imaging features while retaining all subjects. The proposed method comprises two networks: 1) An encoder to extract modality-independent embeddings and 2) A decoder to reconstruct the original measures conditioned on their imaging modalities. The encoder includes a novel ordinal contrastive loss, which aligns samples in the embedding space according to the progression of AD. We also maximize modality-wise coherence of embeddings within each subject, in conjunction with domain adversarial training algorithms, to further enhance alignment between different imaging modalities. The proposed method promotes our holistic imaging feature imputation across various modalities in the shared embedding space. In the experiments, we show that our networks deliver favorable results for statistical analysis and classification against imputation baselines with Alzheimer\u2019s Disease Neuroimaging Initiative (ADNI) study.", "title":"OCL: Ordinal Contrastive Learning for Imputating Features with Progressive Labels", "authors":[ "Baek, Seunghun", "Sim, Jaeyoon", "Wu, Guorong", "Kim, Won Hwa" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":836 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0582_paper.pdf", "bibtext":"@InProceedings{ Ber_Topologically_MICCAI2024,\n author = { Berger, Alexander H. and Lux, Laurin and Stucki, Nico and B\u00fcrgin, Vincent and Shit, Suprosanna and Banaszak, Anna and Rueckert, Daniel and Bauer, Ulrich and Paetzold, Johannes C. },\n title = { { Topologically faithful multi-class segmentation in medical images } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Topological accuracy in medical image segmentation is a highly important property for downstream applications such as network analysis and flow modeling in vessels or cell counting. Recently, significant methodological advancements have brought well-founded concepts from algebraic topology to binary segmentation. However, these approaches have been underexplored in multi-class segmentation scenarios, where topological errors are common. We propose a general loss function for topologically faithful multi-class segmentation extending the recent Betti matching concept, which is based on induced matchings of persistence barcodes. We project the N-class segmentation problem to N single-class segmentation tasks, which allows us to use 1-parameter persistent homology, making training of neural networks computationally feasible. We validate our method on a comprehensive set of four medical datasets with highly variant topological characteristics. Our loss formulation significantly enhances topological correctness in cardiac, cell, artery-vein, and Circle of Willis segmentation.", "title":"Topologically faithful multi-class segmentation in medical images", "authors":[ "Berger, Alexander H.", "Lux, Laurin", "Stucki, Nico", "B\u00fcrgin, Vincent", "Shit, Suprosanna", "Banaszak, Anna", "Rueckert, Daniel", "Bauer, Ulrich", "Paetzold, Johannes C." ], "id":"Conference", "arxiv_id":"2403.11001", "GitHub":[ "https:\/\/github.com\/AlexanderHBerger\/multiclass-BettiMatching" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":837 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3550_paper.pdf", "bibtext":"@InProceedings{ Meu_NeuroConText_MICCAI2024,\n author = { Meudec, Rapha\u00ebl and Ghayem, Fateme and Dock\u00e8s, J\u00e9r\u00f4me and Wassermann, Demian and Thirion, Bertrand },\n title = { { NeuroConText: Contrastive Text-to-Brain Mapping for Neuroscientific Literature } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Neuroscientific literature faces challenges in reliability due to limited statistical power, reproducibility issues, and inconsistent terminology. To address these challenges, we introduce NeuroConText, the first brain meta-analysis model that uses a contrastive approach to enhance the association between textual data and brain activation coordinates reported in 20K neuroscientific articles from PubMed Central. NeuroConText integrates the capabilities of recent advancements in large language models (LLM) such as Mistral-7B instead of traditional bag-of-words methods, to better capture the text semantic, and improve the association with brain activations. Our method is adapted to processing neuroscientific textual data regardless of length and generalizes well across various textual content\u2014titles, abstracts, and full-body\u2014. Our experiments show that NeuroConText significantly outperforms state-of-the-art methods by a threefold increase in linking text to brain activations regarding recall@10. Also, NeuroConText allows decoding brain images from text latent representations, successfully maintaining the quality of brain image reconstruction compared to the state-of-the-art.", "title":"NeuroConText: Contrastive Text-to-Brain Mapping for Neuroscientific Literature", "authors":[ "Meudec, Rapha\u00ebl", "Ghayem, Fateme", "Dock\u00e8s, J\u00e9r\u00f4me", "Wassermann, Demian", "Thirion, Bertrand" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/ghayem\/NeuroConText" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":838 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3487_paper.pdf", "bibtext":"@InProceedings{ Zal_Improving_MICCAI2024,\n author = { Zalevskyi, Vladyslav and Sanchez, Thomas and Roulet, Margaux and Aviles Verdera, Jordina and Hutter, Jana and Kebiri, Hamza and Bach Cuadra, Meritxell },\n title = { { Improving cross-domain brain tissue segmentation in fetal MRI with synthetic data } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmentation of fetal brain tissue from magnetic resonance imaging (MRI) plays a crucial role in the study of in-utero neurodevelopment. However, automated tools face substantial domain shift challenges as they must be robust to highly heterogeneous clinical data, often limited in numbers and lacking annotations.\nIndeed, high variability of the fetal brain morphology, MRI acquisition parameters, and super-resolution reconstruction (SR) algorithms adversely affect the model\u2019s performance when evaluated out-of-domain.\nIn this work, we introduce FetalSynthSeg, a domain randomization method to segment fetal brain MRI, inspired by SynthSeg. Our results show that models trained solely on synthetic data outperform models trained on real data in out-of-domain settings, validated on a 120-subject cross-domain dataset. Furthermore, we extend our evaluation to 40 subjects acquired using low-field (0.55T) MRI and reconstructed with novel SR models, showcasing robustness across different magnetic field strengths and SR algorithms. Leveraging a generative synthetic approach, we tackle the domain shift problem in fetal brain MRI and offer compelling prospects for applications in fields with limited and highly heterogeneous data.", "title":"Improving cross-domain brain tissue segmentation in fetal MRI with synthetic data", "authors":[ "Zalevskyi, Vladyslav", "Sanchez, Thomas", "Roulet, Margaux", "Aviles Verdera, Jordina", "Hutter, Jana", "Kebiri, Hamza", "Bach Cuadra, Meritxell" ], "id":"Conference", "arxiv_id":"2403.15103", "GitHub":[ "https:\/\/github.com\/Medical-Image-Analysis-Laboratory\/FetalSynthSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":839 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3253_paper.pdf", "bibtext":"@InProceedings{ Guo_BGDiffSeg_MICCAI2024,\n author = { Guo, Yilin and Cai, Qingling },\n title = { { BGDiffSeg: a Fast Diffusion Model for Skin Lesion Segmentation via Boundary Enhancement and Global Recognition Guidance } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15009 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the study of skin lesion segmentation, models based on convolution neural networks (CNN) and vision transformers (ViT) have been extensively explored but face challenges in capturing fine details near boundaries. The advent of Diffusion Probabilistic Model (DPM) offers significant promise for this task which demands precise boundary segmentation. In this study, we propose BGDiffSeg, a novel skin lesion segmentation model utilizing a wavelet-transform-based diffusion approach to speed up training and denoising, along with specially designed Diffusion Boundary Enhancement Module (DBEM) and Interactive Bidirectional Attention Module (IBAM) to enhance segmentation accuracy. DBEM enhances boundary features in the diffusion process by integrating extracted boundary information into the decoder. Concurrently, IBAM facilitates dynamic interactions between conditional and generated images at the feature level, thus enhancing the global recognition of target area boundaries. Comprehensive experiments on the ISIC 2016, ISIC 2017, and ISIC 2018 datasets demonstrate BGDiffSeg\u2019s superiority in precision and clarity under limited computational resources and inference time, outperforming existing state-of-the-art methods. Our code will be available at https:\/\/github.com\/erlingzz\/BGDiffSeg.", "title":"BGDiffSeg: a Fast Diffusion Model for Skin Lesion Segmentation via Boundary Enhancement and Global Recognition Guidance", "authors":[ "Guo, Yilin", "Cai, Qingling" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/erlingzz\/BGDiffSeg" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":840 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0622_paper.pdf", "bibtext":"@InProceedings{ Zha_Prompting_MICCAI2024,\n author = { Zhang, Ling and Yun, Boxiang and Xie, Xingran and Li, Qingli and Li, Xinxing and Wang, Yan },\n title = { { Prompting Whole Slide Image Based Genetic Biomarker Prediction } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Prediction of genetic biomarkers, e.g., microsatellite instability and BRAF in colorectal cancer is crucial for clinical decision making. In this paper, we propose a whole slide image (WSI) based genetic biomarker prediction method via prompting techniques. Our work aims at addressing the following challenges: (1) extracting foreground instances related to genetic biomarkers from gigapixel WSIs, and (2) the interaction among the fine-grained pathological components in WSIs. Specifically, we leverage large language models to generate medical prompts that serve as prior knowledge in extracting instances associated with genetic biomarkers. We adopt a coarse-to-fine approach to mine biomarker information within the tumor microenvironment. This involves extracting instances related to genetic biomarkers using coarse medical prior knowledge, grouping pathology instances into fine-grained pathological components and mining their interactions. Experimental results on two colorectal cancer datasets show the superiority of our method, achieving 91.49% in AUC for MSI classification. The analysis further shows the clinical interpretability of our method. Code is publicly available at https:\/\/github.com\/DeepMed-Lab-ECNU\/PromptBio.", "title":"Prompting Whole Slide Image Based Genetic Biomarker Prediction", "authors":[ "Zhang, Ling", "Yun, Boxiang", "Xie, Xingran", "Li, Qingli", "Li, Xinxing", "Wang, Yan" ], "id":"Conference", "arxiv_id":"2407.09540", "GitHub":[ "https:\/\/github.com\/DeepMed-Lab-ECNU\/PromptBio" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":841 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/2402_paper.pdf", "bibtext":"@InProceedings{ Fra_SlicerTMS_MICCAI2024,\n author = { Franke, Loraine and Luo, Jie and Park, Tae Young and Kim, Nam Wook and Rathi, Yogesh and Pieper, Steve and Ning, Lipeng and Haehn, Daniel },\n title = { { SlicerTMS: Real-Time Visualization of Transcranial Magnetic Stimulation for Mental Health Treatment } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"We present a real-time visualization system for Transcranial Magnetic Stimulation (TMS), a non-invasive neuromodulation technique for treating various brain disorders and mental health diseases. Our solution targets the current challenges of slow and labor-intensive practices in treatment planning. Integrating Deep Learning (DL), our system rapidly predicts electric field (E-field) distributions in 0.2 seconds for precise and effective brain stimulation. The core advancement lies in our tool\u2019s real-time neuronavigation visualization capabilities, which support clinicians in making more informed decisions quickly and effectively. We assess our system\u2019s performance through three studies: First, a real-world use case scenario in a clinical setting, providing concrete feedback on applicability and usability in a practical environment. Second, a comparative analysis with another TMS tool focusing on computational efficiency across various hardware platforms. Lastly, we conducted an expert user study to measure usability and influence in optimizing TMS treatment planning. The system is openly available for community use and further development on GitHub: https:\/\/github.com\/lorifranke\/SlicerTMS.", "title":"SlicerTMS: Real-Time Visualization of Transcranial Magnetic Stimulation for Mental Health Treatment", "authors":[ "Franke, Loraine", "Luo, Jie", "Park, Tae Young", "Kim, Nam Wook", "Rathi, Yogesh", "Pieper, Steve", "Ning, Lipeng", "Haehn, Daniel" ], "id":"Conference", "arxiv_id":"2305.06459", "GitHub":[ "https:\/\/github.com\/lorifranke\/SlicerTMS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":842 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/4049_paper.pdf", "bibtext":"@InProceedings{ Ruf_MultiDataset_MICCAI2024,\n author = { Ruffini, Filippo and Tronchin, Lorenzo and Wu, Zhuoru and Chen, Wenting and Soda, Paolo and Shen, Linlin and Guarrasi, Valerio },\n title = { { Multi-Dataset Multi-Task Learning for COVID-19 Prognosis } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"In the fight against the COVID-19 pandemic, leveraging artificial intelligence to predict disease outcomes from chest radiographic images represents a significant scientific aim. The challenge, however, lies in the scarcity of large, labeled datasets with compatible tasks for training deep learning models without leading to overfitting. Addressing this issue, we introduce a novel multi-dataset multi-task training framework that predicts COVID-19 prognostic outcomes from chest X-rays (CXR) by integrating correlated datasets from disparate sources, distant from conventional multi-task learning approaches, which rely on datasets with multiple and correlated labeling schemes. Our framework hypothesizes that assessing severity scores enhances the model\u2019s ability to classify prognostic severity groups, thereby improving its robustness and predictive power. The proposed architecture comprises a deep convolutional network that receives inputs from two publicly available CXR datasets, AIforCOVID for severity prognostic prediction and BRIXIA for severity score assessment, and branches into task-specific fully connected output networks. Moreover, we propose a multi-task loss function, incorporating an indicator function, to exploit multi-dataset integration. The effectiveness and robustness of the proposed approach are demonstrated through significant performance improvements in prognosis classification tasks across 18 different convolutional neural network backbones in different evaluation strategies. This improvement is evident over single-task baselines and standard transfer learning strategies, supported by extensive statistical analysis, showing great application potential.", "title":"Multi-Dataset Multi-Task Learning for COVID-19 Prognosis", "authors":[ "Ruffini, Filippo", "Tronchin, Lorenzo", "Wu, Zhuoru", "Chen, Wenting", "Soda, Paolo", "Shen, Linlin", "Guarrasi, Valerio" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/cosbidev\/Multi-Dataset-Multi-Task-Learning-for-COVID-19-Prognosis" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":843 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3887_paper.pdf", "bibtext":"@InProceedings{ Yan_Deform3DGS_MICCAI2024,\n author = { Yang, Shuojue and Li, Qian and Shen, Daiyun and Gong, Bingchen and Dou, Qi and Jin, Yueming },\n title = { { Deform3DGS: Flexible Deformation for Fast Surgical Scene Reconstruction with Gaussian Splatting } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15006 },\n month = {October},\n pages = { pending },\n }", "abstract":"Tissue deformation poses a key challenge for accurate surgical scene reconstruction. Despite yielding high reconstruction quality, existing methods suffer from slow rendering speeds and long training times, limiting their intraoperative applicability. Motivated by recent progress in 3D Gaussian Splatting, an emerging technology in real-time 3D rendering, this work presents a novel fast reconstruction framework, termed Deform3DGS, for deformable tissues during endoscopic surgery. Specifically, we introduce 3D GS into surgical scenes by integrating a point cloud initialization to improve reconstruction. Furthermore, we propose a novel flexible deformation modeling scheme (FDM) to learn tissue deformation dynamics at the level of individual Gaussians. Our FDM can model the surface deformation with efficient representations, allowing for real-time rendering performance. More importantly, FDM significantly accelerates surgical scene reconstruction, demonstrating considerable clinical values, particularly in intraoperative settings where time efficiency is crucial. Experiments on DaVinci robotic surgery videos indicate the efficacy of our approach, showcasing superior reconstruction fidelity PSNR: (37.90) and rendering speed (338.8 FPS) while substantially reducing training time to only 1 minute\/scene. Our code is available at https:\/\/github.com\/jinlab-imvr\/Deform3DGS.", "title":"Deform3DGS: Flexible Deformation for Fast Surgical Scene Reconstruction with Gaussian Splatting", "authors":[ "Yang, Shuojue", "Li, Qian", "Shen, Daiyun", "Gong, Bingchen", "Dou, Qi", "Jin, Yueming" ], "id":"Conference", "arxiv_id":"2405.17835", "GitHub":[ "https:\/\/github.com\/jinlab-imvr\/Deform3DGS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":844 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1370_paper.pdf", "bibtext":"@InProceedings{ Wu_MMFusion_MICCAI2024,\n author = { Wu, Chengyu and Wang, Chengkai and Zhou, Huiyu and Zhang, Yatao and Wang, Qifeng and Wang, Yaqi and Wang, Shuai },\n title = { { MMFusion: Multi-modality Diffusion Model for Lymph Node Metastasis Diagnosis in Esophageal Cancer } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15005 },\n month = {October},\n pages = { pending },\n }", "abstract":"Esophageal cancer is one of the most common types of cancer worldwide and ranks sixth in cancer-related mortality. Accurate computer-assisted diagnosis of cancer progression can help physicians effectively customize personalized treatment plans. Currently, CT-based cancer diagnosis methods have received much attention for their comprehensive ability to examine patients\u2019 conditions. However, multi-modal based methods may likely introduce information redundancy, leading to underperformance. In addition, efficient and effective interactions between multi-modal representations need to be further explored, lacking insightful exploration of prognostic correlation in multi-modality features. In this work, we introduce a multi-modal heterogeneous graph-based conditional feature-guided diffusion model for lymph node metastasis diagnosis based on CT images as well as clinical measurements and radiomics data. To explore the intricate relationships between multi-modal features, we construct a heterogeneous graph. Following this, a conditional feature-guided diffusion approach is applied to eliminate information redundancy. Moreover, we propose a masked relational representation learning strategy, aiming to uncover the latent prognostic correlations and priorities of primary tumor and lymph node image representations. Various experimental results validate the effectiveness of our proposed method.", "title":"MMFusion: Multi-modality Diffusion Model for Lymph Node Metastasis Diagnosis in Esophageal Cancer", "authors":[ "Wu, Chengyu", "Wang, Chengkai", "Zhou, Huiyu", "Zhang, Yatao", "Wang, Qifeng", "Wang, Yaqi", "Wang, Shuai" ], "id":"Conference", "arxiv_id":"2405.09539", "GitHub":[ "https:\/\/github.com\/wuchengyu123\/MMFusion" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":845 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3647_paper.pdf", "bibtext":"@InProceedings{ Lin_Zeroshot_MICCAI2024,\n author = { Lin, Xiyue and Du, Chenhe and Wu, Qing and Tian, Xuanyu and Yu, Jingyi and Zhang, Yuyao and Wei, Hongjiang },\n title = { { Zero-shot Low-field MRI Enhancement via Denoising Diffusion Driven Neural Representation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15007 },\n month = {October},\n pages = { pending },\n }", "abstract":"Recently, there have been significant advancements in the development of portable low-field (LF) magnetic resonance imaging (MRI) systems. These systems aim to provide low-cost, unshielded, and bedside diagnostic solutions. MRI experiences a diminished signal-to-noise ratio (SNR) at reduced field strengths, which results in severe signal deterioration and poor reconstruction. Therefore, reconstructing a high-field-equivalent image from a low-field MRI is a complex challenge due to the ill-posed nature of the task. In this paper, we introduce diffusion model driven neural representation. We decompose the low-field MRI enhancement problem into a data consistency subproblem and a prior subproblem and solve them in an iterative framework. The diffusion model provides high-quality high-field (HF) MR images prior, while the implicit neural representation ensures data consistency. Experimental results on simulated LF data and clinical LF data indicate that our proposed method capable of achieving zero-shot LF MRI enhancement, showing some potential for clinical applications.", "title":"Zero-shot Low-field MRI Enhancement via Denoising Diffusion Driven Neural Representation", "authors":[ "Lin, Xiyue", "Du, Chenhe", "Wu, Qing", "Tian, Xuanyu", "Yu, Jingyi", "Zhang, Yuyao", "Wei, Hongjiang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":846 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1311_paper.pdf", "bibtext":"@InProceedings{ Jai_MMBCD_MICCAI2024,\n author = { Jain, Kshitiz and Bansal, Aditya and Rangarajan, Krithika and Arora, Chetan },\n title = { { MMBCD: Multimodal Breast Cancer Detection from Mammograms with Clinical History } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Mammography serves as a vital tool for breast cancer detection, with screening and diagnostic modalities catering to distinct patient populations. However, in resource-constrained settings, screening mammography may not be feasible, necessitating reliance on diagnostic approaches. Recent advances in deep learning have shown promise in automated malignancy prediction, yet existing methodologies often overlook crucial clinical context inherent in diagnostic mammography. In this study, we propose a novel approach to integrate mammograms and clinical history to enhance breast cancer detection accuracy. To achieve our objective, we leverage recent advances in foundational models, where we use \\vit for mammograms, and RoBERTa for encoding text based clinical history. Since, current implementations of Vit can not handle large 4Kx4K mammography scans, we device a novel framework to first detect region-of-interests, and then classify using multi-instance-learning strategy, while allowing text embedding from clinical history to attend to the visual regions of interest from the mammograms. Extensive experimentation demonstrates that our model, MMBCD, successfully incorporates contextual information while preserving image resolution and context, leading to superior results over existing methods, and showcasing its potential to significantly improve breast cancer screening practices. We report an (Accuracy, F1) of (0.96,0.82), and (0.95,0.68) on our two in-house test datasets by MMBCD, against (0.91,0.41), and (0.87,0.39) by Lava, and (0.84,0.50), and (0.91,0.27) by CLIP-ViT; both state-of-the-art multi-modal foundational models.", "title":"MMBCD: Multimodal Breast Cancer Detection from Mammograms with Clinical History", "authors":[ "Jain, Kshitiz", "Bansal, Aditya", "Rangarajan, Krithika", "Arora, Chetan" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":847 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0518_paper.pdf", "bibtext":"@InProceedings{ Zha_SegNeuron_MICCAI2024,\n author = { Zhang, Yanchao and Guo, Jinyue and Zhai, Hao and Liu, Jing and Han, Hua },\n title = { { SegNeuron: 3D Neuron Instance Segmentation in Any EM Volume with a Generalist Model } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15008 },\n month = {October},\n pages = { pending },\n }", "abstract":"Building a generalist model for neuron instance segmentation from electron microscopy (EM) volumes holds great potential to accelerate data processing and analysis in connectomics. However, the diversity in visual appearances and voxel resolutions present obstacles to model development. Meanwhile, prompt-based foundation models for segmentation struggle to achieve satisfactory performance due to the inherent complexity and volumetric continuity of neuronal structures. To address this, this paper introduces SegNeuron, a generalist model for dense neuron instance segmentation with strong zero-shot generalizability. To this end, we first construct a multi-resolution, multi-modality, and multi-species volume EM database, named EMNeuron, consisting of over 22 billion voxels, with over 3 billion densely labeled. On this basis, we devise a novel workflow to build the model with customized strategies, including pretraining via multi-scale Gaussian mask reconstruction, domain-mixing finetuning, and foreground-restricted instance segmentation. Experimental results on unseen datasets indicate that SegNeuron not only significantly surpasses existing generalist models, but also achieves competitive or even superior results with specialist models. Datasets, codes, and models are available at https:\/\/github.com\/yanchaoz\/SegNeuron.", "title":"SegNeuron: 3D Neuron Instance Segmentation in Any EM Volume with a Generalist Model", "authors":[ "Zhang, Yanchao", "Guo, Jinyue", "Zhai, Hao", "Liu, Jing", "Han, Hua" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/yanchaoz\/SegNeuron" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":848 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0460_paper.pdf", "bibtext":"@InProceedings{ Wat_Hierarchical_MICCAI2024,\n author = { Watawana, Hasindri and Ranasinghe, Kanchana and Mahmood, Tariq and Naseer, Muzammal and Khan, Salman and Shahbaz Khan, Fahad },\n title = { { Hierarchical Text-to-Vision Self Supervised Alignment for Improved Histopathology Representation Learning } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15004 },\n month = {October},\n pages = { pending },\n }", "abstract":"Self-supervised representation learning has been highly promising for histopathology image analysis with numerous approaches leveraging their patient-slide-patch hierarchy to learn better representations. In this paper, we explore how the combination of domain specific natural language information with such hierarchical visual representations can benefit rich representation learning for medical image tasks. Building on automated language description generation for features visible in histopathology images, we present a novel language-tied self-supervised learning framework, Hierarchical Language-tied Self-Supervision (HLSS) for histopathology images. We explore contrastive objectives and granular language description based text alignment at multiple hierarchies to inject language modality information into the visual representations. Our resulting model achieves state-of-the-art performance on two medical imaging benchmarks, OpenSRH and TCGA datasets. Our framework also provides better interpretability with our language aligned representation space. The code is available at https:\/\/github.com\/Hasindri\/HLSS.", "title":"Hierarchical Text-to-Vision Self Supervised Alignment for Improved Histopathology Representation Learning", "authors":[ "Watawana, Hasindri", "Ranasinghe, Kanchana", "Mahmood, Tariq", "Naseer, Muzammal", "Khan, Salman", "Shahbaz Khan, Fahad" ], "id":"Conference", "arxiv_id":"2403.14616", "GitHub":[ "https:\/\/github.com\/Hasindri\/HLSS" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":849 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1951_paper.pdf", "bibtext":"@InProceedings{ Liu_GEM_MICCAI2024,\n author = { Liu, Shaonan and Chen, Wenting and Liu, Jie and Luo, Xiaoling and Shen, Linlin },\n title = { { GEM: Context-Aware Gaze EstiMation with Visual Search Behavior Matching for Chest Radiograph } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Gaze estimation is pivotal in human scene comprehension tasks, particularly in medical diagnostic analysis. Eye-tracking technology facilitates the recording of physicians\u2019 ocular movements during image interpretation, thereby elucidating their visual attention patterns and information-processing strategies. In this paper, we initially define the context-aware gaze estimation problem in medical radiology report settings. To understand the attention allocation and cognitive behavior of radiologists during the medical image interpretation process, we propose a context-aware Gaze Estimation (GEM) network that utilizes eye gaze data collected from radiologists to simulate their visual search behavior patterns throughout the image interpretation process. It consists of a context-awareness module, visual behavior graph construction, and visual behavior matching. Within the context-awareness module, we achieve intricate multimodal registration by establishing connections between medical reports and images. Subsequently, for a more accurate simulation of genuine visual search behavior patterns, we introduce a visual behavior graph structure, capturing such behavior through high-order relationships (edges) between gaze points (nodes). To maintain the authenticity of visual behavior, we devise a visual behavior-matching approach, adjusting the high-order relationships between them by matching the graph constructed from real and estimated gaze points. Extensive experiments on four publicly available datasets demonstrate the superiority of GEM over existing methods and its strong generalizability, which also provides a new direction for the effective utilization of diverse modalities in medical image interpretation and enhance the interpretability of models in the field of medical imaging. https:\/\/github.com\/Tiger-SN\/GEM", "title":"GEM: Context-Aware Gaze EstiMation with Visual Search Behavior Matching for Chest Radiograph", "authors":[ "Liu, Shaonan", "Chen, Wenting", "Liu, Jie", "Luo, Xiaoling", "Shen, Linlin" ], "id":"Conference", "arxiv_id":"2408.05502", "GitHub":[ "https:\/\/github.com\/Tiger-SN\/GEM" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":850 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/0751_paper.pdf", "bibtext":"@InProceedings{ San_Voxel_MICCAI2024,\n author = { Sanner, Antoine P. and Grauhan, Nils F. and Brockmann, Marc A. and Othman, Ahmed E. and Mukhopadhyay, Anirban },\n title = { { Voxel Scene Graph for Intracranial Hemorrhage } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15002 },\n month = {October},\n pages = { pending },\n }", "abstract":"Patients with Intracranial Hemorrhage (ICH) face a potentially life-threatening condition and patient-centered individualized treatment remains challenging due to possible clinical complications. Deep-Learning-based methods can efficiently analyze the routinely acquired head CTs to support the clinical decision-making. The majority of early work focuses on the detection and segmentation of ICH, but do not model the complex relations between ICH and adjacent brain structures. \nIn this work, we design a tailored object detection method for ICH, which we unite with segmentation-grounded Scene Graph Generation (SGG) methods to learn a holistic representation of the clinical cerebral scene. To the best of our knowledge, this is the first application of SGG for 3D voxel images. We evaluate our method on two head-CT datasets and demonstrate that our model can recall up to 74% of clinically relevant relations. This work lays the foundation towards SGG for 3D voxel data. The generated Scene Graphs can already provide insights for the clinician, but are also valuable for all downstream tasks as a compact and interpretable representation.", "title":"Voxel Scene Graph for Intracranial Hemorrhage", "authors":[ "Sanner, Antoine P.", "Grauhan, Nils F.", "Brockmann, Marc A.", "Othman, Ahmed E.", "Mukhopadhyay, Anirban" ], "id":"Conference", "arxiv_id":"2407.21580", "GitHub":[ "https:\/\/github.com\/MECLabTUDA\/VoxelSceneGraph" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":851 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3431_paper.pdf", "bibtext":"@InProceedings{ Dur_Probabilistic_MICCAI2024,\n author = { Durso-Finley, Joshua and Barile, Berardino and Falet, Jean-Pierre and Arnold, Douglas L. and Pawlowski, Nick and Arbel, Tal },\n title = { { Probabilistic Temporal Prediction of Continuous Disease Trajectories and Treatment Effects Using Neural SDEs } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15003 },\n month = {October},\n pages = { pending },\n }", "abstract":"Personalized medicine based on medical images, including predicting future individualized clinical disease progression and treatment response, would have an enormous impact on healthcare and drug development, particularly for diseases (e.g. multiple sclerosis (MS)) with long term, complex, heterogeneous evolutions and no cure. In this work, we present the first stochastic causal temporal framework to model the continuous temporal evolution of disease progression via Neural Stochastic Differential Equations (NSDE). The proposed causal inference model takes as input the patient\u2019s high dimensional images (MRI) and tabular data, and predicts both factual and counterfactual progression trajectories on different treatments in latent space. The NSDE permits the estimation of high-confidence personalized trajectories and treatment effects. Extensive experiments were performed on a large, multi-centre, proprietary dataset of patient 3D MRI and clinical data acquired during several randomized clinical trials for MS treatments. Our results present the first successful uncertainty-based causal Deep Learning (DL) model to: (a) accurately predict future patient MS disability evolution (e.g. EDSS) and treatment effects leveraging baseline MRI, and (b) permit the discovery of subgroups of patients for which the model has high confidence in their response to treatment even in clinical trials which did not reach their clinical endpoints.", "title":"Probabilistic Temporal Prediction of Continuous Disease Trajectories and Treatment Effects Using Neural SDEs", "authors":[ "Durso-Finley, Joshua", "Barile, Berardino", "Falet, Jean-Pierre", "Arnold, Douglas L.", "Pawlowski, Nick", "Arbel, Tal" ], "id":"Conference", "arxiv_id":"2406.12807", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":852 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1228_paper.pdf", "bibtext":"@InProceedings{ Su_Crossgraph_MICCAI2024,\n author = { Su, Huaqiang and Lei, Haijun and Guoliang, Chen and Lei, Baiying },\n title = { { Cross-graph Interaction and Diffusion Probability Models for Lung Nodule Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Accurate segmentation of lung nodules in computed tomography (CT) images is crucial to advance the treatment of lung cancer. Methods based on diffusion probabilistic models (DPMs) are widely used in medical image segmentation tasks. Nevertheless, conventional DPM encounters challenges when addressing medical image segmentation issues, primarily attributed to the irregular structure of lung nodules and the inherent resemblance between lung nodules and their surrounding environments. Consequently, this study introduces an innovative architecture known as the dual-branch Diff-UNet to address the challenges associated with lung nodule segmentation effectively. Specifically, the denoising UNet in this architecture interactively processes the semantic information captured by the branches of the Transformer and the convolutional neural network (CNN) through bidirectional connection units. Furthermore, the feature fusion module (FFM) helps integrate the semantic features extracted by DPM with the locally detailed features captured by the segmentation network. Simultaneously, a lightweight cross-graph interaction (CGI) module is introduced in the decoder, which uses region and edge features as graph nodes to update and propagate cross-domain features and capture the characteristics of object boundaries. Finally, the multi-scale cross module (MCM) synergizes the deep features from the DPM with the edge features from the segmentation network, augmenting the network\u2019s capability to comprehend images. The Diff-UNet has been proven effective through experiments on challenging datasets, including self-collected datasets and LUNA16.", "title":"Cross-graph Interaction and Diffusion Probability Models for Lung Nodule Segmentation", "authors":[ "Su, Huaqiang", "Lei, Haijun", "Guoliang, Chen", "Lei, Baiying" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":853 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/3978_paper.pdf", "bibtext":"@InProceedings{ Tan_VertFound_MICCAI2024,\n author = { Tang, Jinzhou and Wu, Yinhao and Yao, Zequan and Li, Mingjie and Hong, Yuan and Yu, Dongdong and Gao, Zhifan and Chen, Bin and Zhao, Shen },\n title = { { VertFound: Synergizing Semantic and Spatial Understanding for Fine-grained Vertebrae Classification via Foundation Models } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15012 },\n month = {October},\n pages = { pending },\n }", "abstract":"Achieving automated vertebrae classification in spine images is a crucial yet challenging task due to the repetitive nature of adjacent vertebrae and limited fields of view (FoV). Different from previous methods that leverage the serial information of vertebrae to optimize classification results, we propose VertFound, a framework that harnesses the inherent adaptability and versatility of foundation models for fine-grained vertebrae classification. Specifically, VertFound designs a vertebral positioning with cross-model synergy (VPS) module that efficiently merges semantic information from CLIP and spatial features from SAM, leading to richer feature representations that capture vertebral spatial relationships. Moreover, a novel Wasserstein loss is designed to minimize disparities between image and text feature distributions by continuously optimizing the transport distance between the two distributions, resulting in a more discriminative alignment capability of CLIP for vertebral classification. \nExtensive evaluations on our vertebral MRI dataset show VertFound exhibits significant improvements in both identification rate (IDR) and identification accuracy (IRA), which underscores its efficacy and further shows the remarkable potential of foundation models for fine-grained recognition tasks in the medical domain. Our code is available at https:\/\/github.com\/inhaowu\/VertFound.", "title":"VertFound: Synergizing Semantic and Spatial Understanding for Fine-grained Vertebrae Classification via Foundation Models", "authors":[ "Tang, Jinzhou", "Wu, Yinhao", "Yao, Zequan", "Li, Mingjie", "Hong, Yuan", "Yu, Dongdong", "Gao, Zhifan", "Chen, Bin", "Zhao, Shen" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "https:\/\/github.com\/inhaowu\/VertFound" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Poster", "unique_id":854 }, { "bibtex_url":null, "proceedings":"https:\/\/papers.miccai.org\/miccai-2024\/paper\/1638_paper.pdf", "bibtext":"@InProceedings{ Che_HybridStructureOriented_MICCAI2024,\n author = { Chen, Lingyu and Wang, Yue and Zhao, Zhe and Liao, Hongen and Zhang, Daoqiang and Han, Haojie and Chen, Fang },\n title = { { Hybrid-Structure-Oriented Transformer for Arm Musculoskeletal Ultrasound Segmentation } },\n booktitle = {Medical Image Computing and Computer Assisted Intervention -- MICCAI 2024},\n year = {2024},\n publisher = {Springer Nature Switzerland},\n volume = { LNCS 15001 },\n month = {October},\n pages = { pending },\n }", "abstract":"Segmenting complex layer structures, including subcutaneous fat, skeletal muscle, and bone in arm musculoskeletal ultrasound (MSKUS), is vital for diagnosing and monitoring the progression of Breast-Cancer-Related Lymphedema (BCRL). Nevertheless, previous researches primarily focus on individual muscle or bone segmentation in MSKUS, overlooking the intricate and hybrid-layer morphology that characterizes these structures. To address this limitation, we propose a novel approach called the hybrid structure-oriented Transformer (HSformer), which effectively captures hierarchical structures with diverse morphology in MSKUS. Specifically, HSformer combines a hierarchical-consistency relative position encoding and a structure-biased constraint for hierarchical structure attention. Our experiments on arm MSKUS datasets demonstrate that HSformer achieves state-of-the-art performance in segmenting subcutaneous fat, skeletal muscle and bone.", "title":"Hybrid-Structure-Oriented Transformer for Arm Musculoskeletal Ultrasound Segmentation", "authors":[ "Chen, Lingyu", "Wang, Yue", "Zhao, Zhe", "Liao, Hongen", "Zhang, Daoqiang", "Han, Haojie", "Chen, Fang" ], "id":"Conference", "arxiv_id":"", "GitHub":[ "" ], "paper_page":"", "n_linked_authors":-1, "upvotes":-1, "num_comments":-1, "n_authors":-1, "Models":[ ], "Datasets":[ ], "Spaces":[ ], "old_Models":[ ], "old_Datasets":[ ], "old_Spaces":[ ], "paper_page_exists_pre_conf":0, "type":"Oral", "unique_id":855 } ]