

de Recherche et d’Innovation
en Cybersécurité et Société
Allaoui, M. L.; Allili, M. S.; Belaid, A.
HA-U3Net: A modality-agnostic framework for 3D medical image segmentation using nested V-Net structure and hybrid attention Journal Article
In: Knowledge-Based Systems, vol. 327, 2025, ISSN: 09507051 (ISSN).
Abstract | Links | BibTeX | Tags: 3D medical image, 3D medical image segmentation, Diagnosis, Diagnosis planning, Disease diagnosis, Disease treatment, Generalization capability, Image segmentation, Magnetic resonance imaging, Medical image processing, Medical image segmentation, Nested volume-structure, Net structures, Self hybrid attention, Structures (built objects)
@article{allaoui_ha-u3net_2025,
title = {HA-U3Net: A modality-agnostic framework for 3D medical image segmentation using nested V-Net structure and hybrid attention},
author = {M. L. Allaoui and M. S. Allili and A. Belaid},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011370963&doi=10.1016%2Fj.knosys.2025.114127&partnerID=40&md5=d98a109f015445adb3001bb4017bf953},
doi = {10.1016/j.knosys.2025.114127},
issn = {09507051 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Knowledge-Based Systems},
volume = {327},
abstract = {3D medical image segmentation is essential for disease diagnosis and treatment planning across a wide range of imaging modalities (e.g., MRI, CT, ultrasound, and PET). However, modality-specific challenges, such as noise, artifacts, low contrast, and anatomical variability, along with the presence of small lesions and fuzzy boundaries, hinder the generalization capability of existing segmentation models. In this work, we present HA-U3Net, a novel 3D U-Net-based model designed to address these limitations through a stepwise approach. First, we introduce a deeply nested U3-shaped structure built upon 3D V-Net modules, enabling multi-scale hierarchical representation learning. Second, we integrate a hybrid attention mechanism combining spatial and channel-wise attention to enhance salient features extraction and the delineation of small or poorly defined structures. Third, we demonstrate the cross-modality generalization capabilities of HA-U3Net through extensive evaluations on several datasets, where our model consistently outperforms baseline methods. Finally, we propose a lightweight variant, U3Mamba, reducing computational complexity while maintaining high performance. © 2025 Elsevier B.V.},
keywords = {3D medical image, 3D medical image segmentation, Diagnosis, Diagnosis planning, Disease diagnosis, Disease treatment, Generalization capability, Image segmentation, Magnetic resonance imaging, Medical image processing, Medical image segmentation, Nested volume-structure, Net structures, Self hybrid attention, Structures (built objects)},
pubstate = {published},
tppubtype = {article}
}
Amirkhani, D.; Allili, M. S.; Lapointe, J. -F.
CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds Journal Article
In: IEEE Transactions on Automation Science and Engineering, vol. 22, pp. 19197–19214, 2025, ISSN: 15455955 (ISSN).
Abstract | Links | BibTeX | Tags: Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures
@article{amirkhani_cracksight_2025,
title = {CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds},
author = {D. Amirkhani and M. S. Allili and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011756992&doi=10.1109%2FTASE.2025.3591407&partnerID=40&md5=d908b79e863a4725d10bec325b761f34},
doi = {10.1109/TASE.2025.3591407},
issn = {15455955 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Automation Science and Engineering},
volume = {22},
pages = {19197–19214},
abstract = {Accurate crack segmentation in concrete transportation infrastructures is critical for ensuring structural integrity and facilitating timely maintenance interventions. This paper presents CrackSight, an end-to-end deep learning model for precise crack segmentation across varying observational ranges and extremely complex backgrounds. CrackSight seamlessly integrates crack detection and segmentation through two branches. The Detection Feature Extraction Branch (DFEB) provides global context for crack localization in complex backgrounds or at far observation ranges. It guides the segmentation model to focus on regions with the highest crack-prone potential. The segmentation branch leverages the fusion of multi-scale feature maps using dilated convolutions, allowing to capture subtle and complex crack patterns. The branch also incorporates the Dual-Attention Linear Focus Mechanism (DALFM) enhancing crack segmentation through saliency-driven improvements. Finally, CrackSight uses a novel hybrid contextual loss, which dynamically compensates for class imbalance and enhance crack discrimination against complex backgrounds. Our model is also lightweight and can be run in resource-constrained environments, making it suitable for real-world inspection using mobile platforms. Our results demonstrate that it significantly improves segmentation accuracy, setting a new benchmark for crack segmentation. The dataset and additional resources are available on GitHub. Note to Practitioners—CrackSight is a dual-branch deep learning framework designed for accurate and efficient segmentation of concrete cracks under challenging real-world conditions. By combining a detection-guided localization branch with a context-aware segmentation, CrackSight offers enhanced robustness to noise, background clutter, and varying acquisition distances, common challenges in UAV-based infrastructure inspections. Its architecture integrates multi-scale feature fusion and adaptive contextual guidance, enabling reliable detection of both fine and fragmented cracks. With its lightweight design and fast inference time, CrackSight offers practitioners a practical and scalable solution for automating visual inspection tasks, reducing manual effort, and improving safety in structural health monitoring workflows. © 2025 IEEE.},
keywords = {Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures},
pubstate = {published},
tppubtype = {article}
}
Allaoui, M. L.; Allili, M. S.
MixLVMM: A Mixture of Lightweight Vision Mamba Model for Enhancing Skin Lesion Segmentation Across High Tone Variability Journal Article
In: IEEE Access, vol. 13, pp. 121234–121249, 2025, ISSN: 21693536 (ISSN).
Abstract | Links | BibTeX | Tags: Attention mechanism, Attention mechanisms, Computational efficiency, Critical challenges, Dermatology, Diagnosis, Image segmentation, Lesion segmentations, Lung cancer, Mixture of experts model, Mixture-of-experts model, Segmentation performance, Skin lesion, Skin lesion segmentation, Skin/lesion tone variability, Vision mamba
@article{allaoui_mixlvmm_2025,
title = {MixLVMM: A Mixture of Lightweight Vision Mamba Model for Enhancing Skin Lesion Segmentation Across High Tone Variability},
author = {M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105012036322&doi=10.1109%2FACCESS.2025.3588476&partnerID=40&md5=1cf51dcf43653e1677ad36a1360392ac},
doi = {10.1109/ACCESS.2025.3588476},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {121234–121249},
abstract = {Accurate skin lesion segmentation remains a critical challenge in automated dermatological diagnosis due to heterogeneous lesion presentations, ambiguous boundaries, imaging artifacts, and significant variability in skin and lesion tones across diverse populations. Current segmentation methods inadequately address these multifaceted complexities, particularly failing to handle extreme tone variations that can lead to diagnostic bias. To address these limitations, we present the Mixture of Lightweight Vision Mamba Model (MixLVMM), a novel expert-based framework that enhances segmentation performance across high tone variability through specialized processing. Our approach employs a Siamese network with triplet loss as a gate mechanism to categorize lesions based on tonal characteristics, routing each image to specialized Vision Mamba Model (VMM) experts optimized for specific lesion categories. Each expert utilizes a U-shaped architecture incorporating Focused Vision Mamba blocks and Adaptive Salient Region Attention modules to capture lesion-specific features while maintaining computational efficiency. Comprehensive evaluation on ISIC and PH2 datasets demonstrates that MixLVMM achieves superior segmentation performance with an average Dice coefficient of 93%, surpassing state-of-the-art methods while maintaining efficiency with only 2.5M parameters. These results establish MixLVMM as a robust solution for addressing tone-related segmentation challenges in clinical dermatology, offering both high accuracy and practical deployment feasibility for real-world applications. © 2013 IEEE.},
keywords = {Attention mechanism, Attention mechanisms, Computational efficiency, Critical challenges, Dermatology, Diagnosis, Image segmentation, Lesion segmentations, Lung cancer, Mixture of experts model, Mixture-of-experts model, Segmentation performance, Skin lesion, Skin lesion segmentation, Skin/lesion tone variability, Vision mamba},
pubstate = {published},
tppubtype = {article}
}
Allaoui, M. L.; Allili, M. S.
MEDiXNet: A Robust Mixture of Expert Dermatological Imaging Networks for Skin Lesion Segmentation Proceedings Article
In: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn., IEEE Computer Society, 2024, ISBN: 19457928 (ISSN); 979-835031333-8 (ISBN), (Journal Abbreviation: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.).
Abstract | Links | BibTeX | Tags: Attention mechanism, Attention mechanisms, Blurred boundaries, Cancer detection, Deep learning, Dermatology, Expert systems, Image segmentation, Lesion segmentations, Mixture of experts, Mixture of experts model, Mixture-of-experts model, Salient regions, Skin cancers, Skin lesion, Skin lesion segmentation
@inproceedings{allaoui_medixnet_2024,
title = {MEDiXNet: A Robust Mixture of Expert Dermatological Imaging Networks for Skin Lesion Segmentation},
author = {M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85203397643&doi=10.1109%2fISBI56570.2024.10635430&partnerID=40&md5=c95dd2122f03c944e945b684a111e741},
doi = {10.1109/ISBI56570.2024.10635430},
isbn = {19457928 (ISSN); 979-835031333-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.},
publisher = {IEEE Computer Society},
abstract = {Skin lesion segmentation in dermatological imaging is crucial for early skin cancer detection. However, it is challenging due to variation in lesion appearance, blurred boundaries, and the presence of artifacts. Existing segmentation methods often fall short in accurately addressing these issues. We present MEDiXNet, a novel deep learning model combining expert networks with the Adaptive Salient Region Attention Module (ASRAM) to specifically tackle these challenges. Tailored for varying lesion types, MEDiXNet leverages ASRAM to enhance focus on critical regions, substantially improving segmentation accuracy. Tested on the ISIC datasets, it achieved a 94% Dice coefficient, surpassing state-of-the-art methods. MEDiXNet's innovative approach represents a significant advancement in dermatological imaging, promising to elevate the precision of skin cancer diagnostics. © 2024 IEEE.},
note = {Journal Abbreviation: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.},
keywords = {Attention mechanism, Attention mechanisms, Blurred boundaries, Cancer detection, Deep learning, Dermatology, Expert systems, Image segmentation, Lesion segmentations, Mixture of experts, Mixture of experts model, Mixture-of-experts model, Salient regions, Skin cancers, Skin lesion, Skin lesion segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouboukpo, A.; Allaoui, M. L.; Allili, M. S.
Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation Journal Article
In: Engineering Applications of Artificial Intelligence, vol. 135, 2024, ISSN: 09521976 (ISSN), (Publisher: Elsevier Ltd).
Abstract | Links | BibTeX | Tags: Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data
@article{nouboukpo_multi-scale_2024,
title = {Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation},
author = {A. Nouboukpo and M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195700182&doi=10.1016%2fj.engappai.2024.108681&partnerID=40&md5=e1cc2b6a1bb0aed530e8c04583c76167},
doi = {10.1016/j.engappai.2024.108681},
issn = {09521976 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Engineering Applications of Artificial Intelligence},
volume = {135},
publisher = {Elsevier Ltd},
abstract = {This paper introduces a novel semi-supervised framework, the Multiscale Spatial Consistency Network (MSCNet), for robust semi-supervised skin lesion segmentation. MSCNet uses local and global spatial consistency to leverage a minimal set of labeled data, supplemented by a large number of unlabeled data, to improve segmentation. The model is is based on a single Encoder–Decoder (ED) network, augmented with a Spatially-Constrained Mixture Model (SCMM) to enforce spatial coherence in predictions. To encode the local spatial consistency, a hierarchical superpixel structure is used capture local region context (LRC), bolstering the model capacity to discern fine-grained lesion details. Global consistency is enforced through the SCMM module, which uses a larger context for lesion/background discrimination. In addition, it enables efficient leveraging of the unlabeled data through pseudo-label generation. Experiments demonstrate that the MSCNet outperforms existing state-of-the-art methods in segmenting complex lesions. The MSCNet has an excellent generalization capability, offering a promising direction for semi-supervised medical image segmentation, particularly in scenarios with limited annotated data. The code is available at https://github.com/AdamaTG/MSCNet. © 2024 Elsevier Ltd},
note = {Publisher: Elsevier Ltd},
keywords = {Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data},
pubstate = {published},
tppubtype = {article}
}
Messaoudi, H.; Belaid, A.; Allaoui, M. L.; Zetout, A.; Allili, M. S.; Tliba, S.; Salem, D. Ben; Conze, P. -H.
Efficient Embedding Network for 3D Brain Tumor Segmentation Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 12658 LNCS, pp. 252–262, 2021, ISSN: 03029743, (ISBN: 9783030720834).
Abstract | Links | BibTeX | Tags: 3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors
@article{messaoudi_efficient_2021,
title = {Efficient Embedding Network for 3D Brain Tumor Segmentation},
author = {H. Messaoudi and A. Belaid and M. L. Allaoui and A. Zetout and M. S. Allili and S. Tliba and D. Ben Salem and P. -H. Conze},
editor = {Bakas S. Crimi A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85107387134&doi=10.1007%2f978-3-030-72084-1_23&partnerID=40&md5=b3aa3516b0465a1bf5611db4727d95f1},
doi = {10.1007/978-3-030-72084-1_23},
issn = {03029743},
year = {2021},
date = {2021-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {12658 LNCS},
pages = {252–262},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {3D medical image processing with deep learning greatly suffers from a lack of data. Thus, studies carried out in this field are limited compared to works related to 2D natural image analysis, where very large datasets exist. As a result, powerful and efficient 2D convolutional neural networks have been developed and trained. In this paper, we investigate a way to transfer the performance of a two-dimensional classification network for the purpose of three-dimensional semantic segmentation of brain tumors. We propose an asymmetric U-Net network by incorporating the EfficientNet model as part of the encoding branch. As the input data is in 3D, the first layers of the encoder are devoted to the reduction of the third dimension in order to fit the input of the EfficientNet network. Experimental results on validation and test data from the BraTS 2020 challenge demonstrate that the proposed method achieve promising performance. © 2021, Springer Nature Switzerland AG.},
note = {ISBN: 9783030720834},
keywords = {3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors},
pubstate = {published},
tppubtype = {article}
}
Nouboukpo, A.; Allili, M. S.
Spatially-coherent segmentation using hierarchical gaussian mixture reduction based on cauchy-schwarz divergence Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 11662 LNCS, pp. 388–396, 2019, ISSN: 03029743, (ISBN: 9783030272012).
Abstract | Links | BibTeX | Tags: Cauchy-Schwarz divergence, Foreground segmentation, Gaussian distribution, Gaussian Mixture Model, Gaussian mixture reduction, Image analysis, Image segmentation, Mixture reductions, Reduction algorithms, Reduction techniques, State-of-art methods
@article{nouboukpo_spatially-coherent_2019,
title = {Spatially-coherent segmentation using hierarchical gaussian mixture reduction based on cauchy-schwarz divergence},
author = {A. Nouboukpo and M. S. Allili},
editor = {Campilho A. Yu A. Karray F.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85071452890&doi=10.1007%2f978-3-030-27202-9_35&partnerID=40&md5=2689080f7b2410040a038f080ef93bfa},
doi = {10.1007/978-3-030-27202-9_35},
issn = {03029743},
year = {2019},
date = {2019-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {11662 LNCS},
pages = {388–396},
publisher = {Springer Verlag},
abstract = {Gaussian mixture models (GMM) are widely used for image segmentation. The bigger the number in the mixture, the higher will be the data likelihood. Unfortunately, too many GMM components leads to model overfitting and poor segmentation. Thus, there has been a growing interest in GMM reduction algorithms that rely on component fusion while preserving the structure of data. In this work, we present an algorithm based on a closed-form Cauchy-Schwarz divergence for GMM reduction. Contrarily to previous GMM reduction techniques which a single GMM, our approach can lead to multiple small GMMs describing more accurately the structure of the data. Experiments on image foreground segmentation demonstrate the effectiveness of our proposed model compared to state-of-art methods. © Springer Nature Switzerland AG 2019.},
note = {ISBN: 9783030272012},
keywords = {Cauchy-Schwarz divergence, Foreground segmentation, Gaussian distribution, Gaussian Mixture Model, Gaussian mixture reduction, Image analysis, Image segmentation, Mixture reductions, Reduction algorithms, Reduction techniques, State-of-art methods},
pubstate = {published},
tppubtype = {article}
}
Filali, I.; Allili, M. S.; Benblidia, N.
Multi-graph based salient object detection Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 9730, pp. 318–324, 2016, ISSN: 03029743, (ISBN: 9783319415000).
Abstract | Links | BibTeX | Tags: Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects
@article{filali_multi-graph_2016,
title = {Multi-graph based salient object detection},
author = {I. Filali and M. S. Allili and N. Benblidia},
editor = {Karray F. Campilho A. Campilho A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978804496&doi=10.1007%2f978-3-319-41501-7_36&partnerID=40&md5=eb519756d2e72245e4131d5dc0b416b5},
doi = {10.1007/978-3-319-41501-7_36},
issn = {03029743},
year = {2016},
date = {2016-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {9730},
pages = {318–324},
publisher = {Springer Verlag},
abstract = {We propose a multi-layer graph based approach for salient object detection in natural images. Starting from a set of multi-scale image decomposition using superpixels, we propose an objective function optimized on a multi-layer graph structure to diffuse saliency from image borders to salient objects. After isolating the object kernel, we enhance the accuracy of our saliency maps through an objectness-like based refinement approach. Beside its simplicity, our algorithm yields very accurate salient objects with clear boundaries. Experiments have shown that our approach outperforms several recent methods dealing with salient object detection. © Springer International Publishing Switzerland 2016.},
note = {ISBN: 9783319415000},
keywords = {Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Likelihood-based feature relevance for figure-ground segmentation in images and videos Journal Article
In: Neurocomputing, vol. 167, pp. 658–670, 2015, ISSN: 09252312, (Publisher: Elsevier).
Abstract | Links | BibTeX | Tags: accuracy, algorithm, article, calculation, Feature relevance, Figure-ground segmentations, Gaussian mixture model (GMMs), Image analysis, Image Enhancement, image quality, Image segmentation, Level Set, linear system, mathematical analysis, mathematical model, Negative examples, priority journal, Video cameras, videorecording
@article{allili_likelihood-based_2015,
title = {Likelihood-based feature relevance for figure-ground segmentation in images and videos},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952631642&doi=10.1016%2fj.neucom.2015.04.015&partnerID=40&md5=833948d0784e0dc42c2245b9343971dd},
doi = {10.1016/j.neucom.2015.04.015},
issn = {09252312},
year = {2015},
date = {2015-01-01},
journal = {Neurocomputing},
volume = {167},
pages = {658–670},
publisher = {Elsevier},
abstract = {We propose an efficient method for image/video figure-ground segmentation using feature relevance (FR) and active contours. Given a set of positive and negative examples of a specific foreground (an object of interest (OOI) in an image or a tracked objet in a video), we first learn the foreground distribution model and its characteristic features that best discriminate it from its contextual background. For this goal, an objective function based on feature likelihood ratio is proposed for supervised FR computation. FR is then incorporated in foreground segmentation of new images and videos using level sets and energy minimization. We show the effectiveness of our approach on several examples of image/video figure-ground segmentation. © 2015 Elsevier B.V.},
note = {Publisher: Elsevier},
keywords = {accuracy, algorithm, article, calculation, Feature relevance, Figure-ground segmentations, Gaussian mixture model (GMMs), Image analysis, Image Enhancement, image quality, Image segmentation, Level Set, linear system, mathematical analysis, mathematical model, Negative examples, priority journal, Video cameras, videorecording},
pubstate = {published},
tppubtype = {article}
}
Boulmerka, A.; Allili, M. Saïd; Ait-Aoudia, S.
A generalized multiclass histogram thresholding approach based on mixture modelling Journal Article
In: Pattern Recognition, vol. 47, no. 3, pp. 1330–1348, 2014, ISSN: 00313203.
Abstract | Links | BibTeX | Tags: Arbitrary number, Conditional distribution, Gaussian distribution, Gaussian noise (electronic), Generalized Gaussian Distributions, Graphic methods, Histogram thresholding, Image segmentation, Minimum error thresholding, Mixture-modelling, Mixtures, State-of-the-art techniques, Statistical methods, Thresholding, Thresholding methods
@article{boulmerka_generalized_2014,
title = {A generalized multiclass histogram thresholding approach based on mixture modelling},
author = {A. Boulmerka and M. Saïd Allili and S. Ait-Aoudia},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84888328869&doi=10.1016%2fj.patcog.2013.09.004&partnerID=40&md5=d8b872bd0abe9e6c4d52439f8ec360bc},
doi = {10.1016/j.patcog.2013.09.004},
issn = {00313203},
year = {2014},
date = {2014-01-01},
journal = {Pattern Recognition},
volume = {47},
number = {3},
pages = {1330–1348},
abstract = {This paper presents a new approach to multi-class thresholding-based segmentation. It considerably improves existing thresholding methods by efficiently modeling non-Gaussian and multi-modal class-conditional distributions using mixtures of generalized Gaussian distributions (MoGG). The proposed approach seamlessly: (1) extends the standard Otsu's method to arbitrary numbers of thresholds and (2) extends the Kittler and Illingworth minimum error thresholding to non-Gaussian and multi-modal class-conditional data. MoGGs enable efficient representation of heavy-tailed data and multi-modal histograms with flat or sharply shaped peaks. Experiments on synthetic data and real-world image segmentation show the performance of the proposed approach with comparison to recent state-of-the-art techniques. © 2013 Elsevier Ltd. All rights reserved.},
keywords = {Arbitrary number, Conditional distribution, Gaussian distribution, Gaussian noise (electronic), Generalized Gaussian Distributions, Graphic methods, Histogram thresholding, Image segmentation, Minimum error thresholding, Mixture-modelling, Mixtures, State-of-the-art techniques, Statistical methods, Thresholding, Thresholding methods},
pubstate = {published},
tppubtype = {article}
}



