

de Recherche et d’Innovation
en Cybersécurité et Société
Nouboukpo, A.; Allaoui, M. L.; Allili, M. S.
Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation Article de journal
Dans: Engineering Applications of Artificial Intelligence, vol. 135, 2024, ISSN: 09521976 (ISSN), (Publisher: Elsevier Ltd).
Résumé | Liens | BibTeX | Étiquettes: Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data
@article{nouboukpo_multi-scale_2024,
title = {Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation},
author = {A. Nouboukpo and M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195700182&doi=10.1016%2fj.engappai.2024.108681&partnerID=40&md5=e1cc2b6a1bb0aed530e8c04583c76167},
doi = {10.1016/j.engappai.2024.108681},
issn = {09521976 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Engineering Applications of Artificial Intelligence},
volume = {135},
abstract = {This paper introduces a novel semi-supervised framework, the Multiscale Spatial Consistency Network (MSCNet), for robust semi-supervised skin lesion segmentation. MSCNet uses local and global spatial consistency to leverage a minimal set of labeled data, supplemented by a large number of unlabeled data, to improve segmentation. The model is is based on a single Encoder–Decoder (ED) network, augmented with a Spatially-Constrained Mixture Model (SCMM) to enforce spatial coherence in predictions. To encode the local spatial consistency, a hierarchical superpixel structure is used capture local region context (LRC), bolstering the model capacity to discern fine-grained lesion details. Global consistency is enforced through the SCMM module, which uses a larger context for lesion/background discrimination. In addition, it enables efficient leveraging of the unlabeled data through pseudo-label generation. Experiments demonstrate that the MSCNet outperforms existing state-of-the-art methods in segmenting complex lesions. The MSCNet has an excellent generalization capability, offering a promising direction for semi-supervised medical image segmentation, particularly in scenarios with limited annotated data. The code is available at https://github.com/AdamaTG/MSCNet. © 2024 Elsevier Ltd},
note = {Publisher: Elsevier Ltd},
keywords = {Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data},
pubstate = {published},
tppubtype = {article}
}
Messaoudi, H.; Belaid, A.; Allaoui, M. L.; Zetout, A.; Allili, M. S.; Tliba, S.; Salem, D. Ben; Conze, P. -H.
Efficient Embedding Network for 3D Brain Tumor Segmentation Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 12658 LNCS, p. 252–262, 2021, ISSN: 03029743, (ISBN: 9783030720834 Publisher: Springer Science and Business Media Deutschland GmbH).
Résumé | Liens | BibTeX | Étiquettes: 3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors
@article{messaoudi_efficient_2021,
title = {Efficient Embedding Network for 3D Brain Tumor Segmentation},
author = {H. Messaoudi and A. Belaid and M. L. Allaoui and A. Zetout and M. S. Allili and S. Tliba and D. Ben Salem and P. -H. Conze},
editor = {Bakas S. Crimi A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85107387134&doi=10.1007%2f978-3-030-72084-1_23&partnerID=40&md5=b3aa3516b0465a1bf5611db4727d95f1},
doi = {10.1007/978-3-030-72084-1_23},
issn = {03029743},
year = {2021},
date = {2021-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {12658 LNCS},
pages = {252–262},
abstract = {3D medical image processing with deep learning greatly suffers from a lack of data. Thus, studies carried out in this field are limited compared to works related to 2D natural image analysis, where very large datasets exist. As a result, powerful and efficient 2D convolutional neural networks have been developed and trained. In this paper, we investigate a way to transfer the performance of a two-dimensional classification network for the purpose of three-dimensional semantic segmentation of brain tumors. We propose an asymmetric U-Net network by incorporating the EfficientNet model as part of the encoding branch. As the input data is in 3D, the first layers of the encoder are devoted to the reduction of the third dimension in order to fit the input of the EfficientNet network. Experimental results on validation and test data from the BraTS 2020 challenge demonstrate that the proposed method achieve promising performance. © 2021, Springer Nature Switzerland AG.},
note = {ISBN: 9783030720834
Publisher: Springer Science and Business Media Deutschland GmbH},
keywords = {3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors},
pubstate = {published},
tppubtype = {article}
}