

de Recherche et d’Innovation
en Cybersécurité et Société
Zetout, A.; Allili, M. S.
CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers Journal Article
In: Remote Sensing, vol. 17, no. 14, 2025, ISSN: 20724292 (ISSN).
Abstract | Links | BibTeX | Tags: Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics
@article{zetout_csdnet_2025,
title = {CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers},
author = {A. Zetout and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011677142&doi=10.3390%2Frs17142337&partnerID=40&md5=a83db334b208d065476e0026ad0ee416},
doi = {10.3390/rs17142337},
issn = {20724292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Remote Sensing},
volume = {17},
number = {14},
abstract = {Accurate multi-class semantic segmentation of disaster-affected areas is essential for rapid response and effective recovery planning. We present CSDNet, a context-aware segmentation model tailored to disaster scene scenarios, designed to improve segmentation of both large-scale disaster zones and small, underrepresented classes. The architecture combines a lightweight transformer module for global context modeling with depthwise separable convolutions (DWSCs) to enhance efficiency without compromising representational capacity. Additionally, we introduce a detection-guided feature fusion mechanism that integrates outputs from auxiliary detection tasks to mitigate class imbalance and improve discrimination of visually similar categories. Extensive experiments on several public datasets demonstrate that our model significantly improves segmentation of both man-made infrastructure and natural damage-related features, offering a robust and efficient solution for post-disaster analysis. © 2025 by the authors.},
keywords = {Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics},
pubstate = {published},
tppubtype = {article}
}
Amirkhani, D.; Allili, M. S.; Lapointe, J. -F.
CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds Journal Article
In: IEEE Transactions on Automation Science and Engineering, vol. 22, pp. 19197–19214, 2025, ISSN: 15455955 (ISSN).
Abstract | Links | BibTeX | Tags: Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures
@article{amirkhani_cracksight_2025,
title = {CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds},
author = {D. Amirkhani and M. S. Allili and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011756992&doi=10.1109%2FTASE.2025.3591407&partnerID=40&md5=d908b79e863a4725d10bec325b761f34},
doi = {10.1109/TASE.2025.3591407},
issn = {15455955 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Automation Science and Engineering},
volume = {22},
pages = {19197–19214},
abstract = {Accurate crack segmentation in concrete transportation infrastructures is critical for ensuring structural integrity and facilitating timely maintenance interventions. This paper presents CrackSight, an end-to-end deep learning model for precise crack segmentation across varying observational ranges and extremely complex backgrounds. CrackSight seamlessly integrates crack detection and segmentation through two branches. The Detection Feature Extraction Branch (DFEB) provides global context for crack localization in complex backgrounds or at far observation ranges. It guides the segmentation model to focus on regions with the highest crack-prone potential. The segmentation branch leverages the fusion of multi-scale feature maps using dilated convolutions, allowing to capture subtle and complex crack patterns. The branch also incorporates the Dual-Attention Linear Focus Mechanism (DALFM) enhancing crack segmentation through saliency-driven improvements. Finally, CrackSight uses a novel hybrid contextual loss, which dynamically compensates for class imbalance and enhance crack discrimination against complex backgrounds. Our model is also lightweight and can be run in resource-constrained environments, making it suitable for real-world inspection using mobile platforms. Our results demonstrate that it significantly improves segmentation accuracy, setting a new benchmark for crack segmentation. The dataset and additional resources are available on GitHub. Note to Practitioners—CrackSight is a dual-branch deep learning framework designed for accurate and efficient segmentation of concrete cracks under challenging real-world conditions. By combining a detection-guided localization branch with a context-aware segmentation, CrackSight offers enhanced robustness to noise, background clutter, and varying acquisition distances, common challenges in UAV-based infrastructure inspections. Its architecture integrates multi-scale feature fusion and adaptive contextual guidance, enabling reliable detection of both fine and fragmented cracks. With its lightweight design and fast inference time, CrackSight offers practitioners a practical and scalable solution for automating visual inspection tasks, reducing manual effort, and improving safety in structural health monitoring workflows. © 2025 IEEE.},
keywords = {Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures},
pubstate = {published},
tppubtype = {article}
}
Hebbache, L.; Amirkhani, D.; Allili, M. S.; Hammouche, N.; Lapointe, J. -F.
Leveraging Saliency in Single-Stage Multi-Label Concrete Defect Detection Using Unmanned Aerial Vehicle Imagery Journal Article
In: Remote Sensing, vol. 15, no. 5, 2023, ISSN: 20724292, (Publisher: MDPI).
Abstract | Links | BibTeX | Tags: Aerial vehicle, Aircraft detection, Antennas, Computational efficiency, Concrete defects, Deep learning, Defect detection, extraction, Feature extraction, Features extraction, Image acquisition, Image Enhancement, Multi-labels, One-stage concrete defect detection, Saliency, Single stage, Unmanned aerial vehicles (UAV), Unmanned areal vehicle imagery
@article{hebbache_leveraging_2023,
title = {Leveraging Saliency in Single-Stage Multi-Label Concrete Defect Detection Using Unmanned Aerial Vehicle Imagery},
author = {L. Hebbache and D. Amirkhani and M. S. Allili and N. Hammouche and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85149966766&doi=10.3390%2frs15051218&partnerID=40&md5=7bf1cb3353270c696c07ff24dc24655d},
doi = {10.3390/rs15051218},
issn = {20724292},
year = {2023},
date = {2023-01-01},
journal = {Remote Sensing},
volume = {15},
number = {5},
publisher = {MDPI},
abstract = {Visual inspection of concrete structures using Unmanned Areal Vehicle (UAV) imagery is a challenging task due to the variability of defects’ size and appearance. This paper proposes a high-performance model for automatic and fast detection of bridge concrete defects using UAV-acquired images. Our method, coined the Saliency-based Multi-label Defect Detector (SMDD-Net), combines pyramidal feature extraction and attention through a one-stage concrete defect detection model. The attention module extracts local and global saliency features, which are scaled and integrated with the pyramidal feature extraction module of the network using the max-pooling, multiplication, and residual skip connections operations. This has the effect of enhancing the localisation of small and low-contrast defects, as well as the overall accuracy of detection in varying image acquisition ranges. Finally, a multi-label loss function detection is used to identify and localise overlapping defects. The experimental results on a standard dataset and real-world images demonstrated the performance of SMDD-Net with regard to state-of-the-art techniques. The accuracy and computational efficiency of SMDD-Net make it a suitable method for UAV-based bridge structure inspection. © 2023 by the authors.},
note = {Publisher: MDPI},
keywords = {Aerial vehicle, Aircraft detection, Antennas, Computational efficiency, Concrete defects, Deep learning, Defect detection, extraction, Feature extraction, Features extraction, Image acquisition, Image Enhancement, Multi-labels, One-stage concrete defect detection, Saliency, Single stage, Unmanned aerial vehicles (UAV), Unmanned areal vehicle imagery},
pubstate = {published},
tppubtype = {article}
}
Saidani, N.; Adi, K.; Allili, M. S.
A supervised approach for spam detection using text-based semantic representation Journal Article
In: Lecture Notes in Business Information Processing, vol. 289, pp. 136–148, 2017, ISSN: 18651348, (ISBN: 9783319590400).
Abstract | Links | BibTeX | Tags: Domain categorization, E-mail spam, Electronic mail, Feature extraction, Semantic analysis, Semantic features, Semantic representation, Semantic structures, Semantics, Spam detection, Spam filtering
@article{saidani_supervised_2017,
title = {A supervised approach for spam detection using text-based semantic representation},
author = {N. Saidani and K. Adi and M. S. Allili},
editor = {Aimeur E. Weiss M. Ruhi U.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019905686&doi=10.1007%2f978-3-319-59041-7_8&partnerID=40&md5=f416f274d5e08603fa6d1ec9a4cf9c43},
doi = {10.1007/978-3-319-59041-7_8},
issn = {18651348},
year = {2017},
date = {2017-01-01},
journal = {Lecture Notes in Business Information Processing},
volume = {289},
pages = {136–148},
publisher = {Springer Verlag},
abstract = {In this paper, we propose an approach for email spam detection based on text semantic analysis at two levels. The first level allows categorization of emails by specific domains (e.g., health, education, finance, etc.). The second level uses semantic features for spam detection in each specific domain. We show that the proposed method provides an efficient representation of internal semantic structure of email content which allows for more precise and interpretable spam filtering results compared to existing methods. © Springer International Publishing AG 2017.},
note = {ISBN: 9783319590400},
keywords = {Domain categorization, E-mail spam, Electronic mail, Feature extraction, Semantic analysis, Semantic features, Semantic representation, Semantic structures, Semantics, Spam detection, Spam filtering},
pubstate = {published},
tppubtype = {article}
}
Yapi, D.; Mejri, M.; Allili, M. S.; Baaziz, N.
A learning-based approach for automatic defect detection in textile images Proceedings Article
In: A., Zaremba M. Sasiadek J. Dolgui (Ed.): IFAC-PapersOnLine, pp. 2423–2428, 2015, ISBN: 24058963 (ISSN), (Journal Abbreviation: IFAC-PapersOnLine).
Abstract | Links | BibTeX | Tags: Algorithms, Artificial intelligence, Automatic defect detections, Barium compounds, Bayes Classifier, Computational efficiency, Contourlets, Defect detection, Defect detection algorithm, Defects, Detection problems, Feature extraction, Feature extraction and classification, Gaussians, Image classification, Learning algorithms, Learning systems, Learning-based approach, Machine learning approaches, Mixture of generalized gaussians, Mixtures of generalized Gaussians (MoGG), Textile defect detection, Textile images, Textiles, Textures
@inproceedings{yapi_learning-based_2015,
title = {A learning-based approach for automatic defect detection in textile images},
author = {D. Yapi and M. Mejri and M. S. Allili and N. Baaziz},
editor = {Zaremba M. Sasiadek J. Dolgui A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84953865559&doi=10.1016%2fj.ifacol.2015.06.451&partnerID=40&md5=3dd0ef4c27cbd55700f6511af5f46772},
doi = {10.1016/j.ifacol.2015.06.451},
isbn = {24058963 (ISSN)},
year = {2015},
date = {2015-01-01},
booktitle = {IFAC-PapersOnLine},
volume = {28},
number = {3},
pages = {2423–2428},
abstract = {This paper addresses the textile defect detection problem using a machine-learning approach. We propose a novel algorithm that uses supervised learning to classify textile textures in defect and non-defect classes based on suitable feature extraction and classification. We use statistical modeling of multi-scale contourlet image decomposition to obtain compact and accurate signatures for texture description. Our defect detection algorithm is based on two phases. In the first phase, using a training set of images, we extract reference defect-free signatures for each textile category. Then, we use the Bayes classifier (BC) to learn signatures of defected and non-defected classes. In the second phase, defects are detected on new images using the trained BC and an appropriate decomposition of images into blocks. Our algorithm has the capability to achieve highly accurate defect detection and localisation in textile textures while ensuring an efficient computational time. Compared to recent state-of-the-art methods, our algorithm has yielded better results on the standard TILDA database. © 2015, IFAC (International Federation of Automatic Control) Hosting by Elsevier Ltd. All rights reserved.},
note = {Journal Abbreviation: IFAC-PapersOnLine},
keywords = {Algorithms, Artificial intelligence, Automatic defect detections, Barium compounds, Bayes Classifier, Computational efficiency, Contourlets, Defect detection, Defect detection algorithm, Defects, Detection problems, Feature extraction, Feature extraction and classification, Gaussians, Image classification, Learning algorithms, Learning systems, Learning-based approach, Machine learning approaches, Mixture of generalized gaussians, Mixtures of generalized Gaussians (MoGG), Textile defect detection, Textile images, Textiles, Textures},
pubstate = {published},
tppubtype = {inproceedings}
}
Pedrocca, P. J.; Allili, M. S.
Real-time people detection in videos using geometrical features and adaptive boosting Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 6753 LNCS, no. PART 1, pp. 314–324, 2011, ISSN: 03029743, (ISBN: 9783642215926 Place: Burnaby, BC).
Abstract | Links | BibTeX | Tags: Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences
@article{pedrocca_real-time_2011,
title = {Real-time people detection in videos using geometrical features and adaptive boosting},
author = {P. J. Pedrocca and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960336661&doi=10.1007%2f978-3-642-21593-3_32&partnerID=40&md5=47ca975800e68648e02f76eba89a7457},
doi = {10.1007/978-3-642-21593-3_32},
issn = {03029743},
year = {2011},
date = {2011-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {6753 LNCS},
number = {PART 1},
pages = {314–324},
abstract = {In this paper, we propose a new approach for detecting people in video sequences based on geometrical features and AdaBoost learning. Unlike its predecessors, our approach uses features calculated directly from silhouettes produced by change detection algorithms. Moreover, feature analysis is done part by part for each silhouette, making our approach efficiently applicable for partially-occluded pedestrians and groups of people detection. Experiments on real-world videos showed us the performance of the proposed approach for real-time pedestrian detection. © 2011 Springer-Verlag.},
note = {ISBN: 9783642215926
Place: Burnaby, BC},
keywords = {Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.; Bouguila, N.; Boutemedjet, S.
Image and video segmentation by combining unsupervised generalized Gaussian mixture modeling and feature selection Journal Article
In: IEEE Transactions on Circuits and Systems for Video Technology, vol. 20, no. 10, pp. 1373–1377, 2010, ISSN: 10518215.
Abstract | Links | BibTeX | Tags: Clustering model, Feature extraction, Feature selection, Gaussian distribution, Generalized Gaussian, Heavy-tailed, High dimensional spaces, Image and video segmentation, Image segmentation, image/video segmentation, Minimum message lengths, Real-world image, Video cameras
@article{allili_image_2010,
title = {Image and video segmentation by combining unsupervised generalized Gaussian mixture modeling and feature selection},
author = {M. S. Allili and D. Ziou and N. Bouguila and S. Boutemedjet},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77957964550&doi=10.1109%2fTCSVT.2010.2077483&partnerID=40&md5=d888c7fe52eff37a5744bccd6a4d3d9e},
doi = {10.1109/TCSVT.2010.2077483},
issn = {10518215},
year = {2010},
date = {2010-01-01},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
volume = {20},
number = {10},
pages = {1373–1377},
abstract = {In this letter, we propose a clustering model that efficiently mitigates image and video under/over-segmentation by combining generalized Gaussian mixture modeling and feature selection. The model has flexibility to accurately represent heavy-tailed image/video histograms, while automatically discarding uninformative features, leading to better discrimination and localization of regions in high-dimensional spaces. Experimental results on a database of real-world images and videos showed us the effectiveness of the proposed approach. © 2010 IEEE.},
keywords = {Clustering model, Feature extraction, Feature selection, Gaussian distribution, Generalized Gaussian, Heavy-tailed, High dimensional spaces, Image and video segmentation, Image segmentation, image/video segmentation, Minimum message lengths, Real-world image, Video cameras},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.; Bouguila, N.; Boutemedjet, S.
Unsupervised feature selection and learning for image segmentation Proceedings Article
In: CRV 2010 - 7th Canadian Conference on Computer and Robot Vision, pp. 285–292, Ottawa, ON, 2010, ISBN: 978-0-7695-4040-5.
Abstract | Links | BibTeX | Tags: Clustering algorithms, Computer vision, Evolutionary algorithms, Feature extraction, Feature selection, Gaussian distribution, Generalized Gaussian, Generalized Gaussian Distributions, Heavy-tailed, High dimensional spaces, Image distributions, Image segmentation, Large database, Over-estimation, Real-world image, Unsupervised feature selection
@inproceedings{allili_unsupervised_2010,
title = {Unsupervised feature selection and learning for image segmentation},
author = {M. S. Allili and D. Ziou and N. Bouguila and S. Boutemedjet},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77954407977&doi=10.1109%2fCRV.2010.44&partnerID=40&md5=a7d8e3147216429f18ef7af3167acb42},
doi = {10.1109/CRV.2010.44},
isbn = {978-0-7695-4040-5},
year = {2010},
date = {2010-01-01},
booktitle = {CRV 2010 - 7th Canadian Conference on Computer and Robot Vision},
pages = {285–292},
address = {Ottawa, ON},
abstract = {In this paper we investigate the integration of feature selection in segmentation through an unsupervised learning approach. We propose a clustering algorithm that efficiently mitigates image under/over-segmentation, by combining generalized Gaussian mixture modeling and feature selection. The algorithm is based on generalized Gaussian mixture modeling which is less prone to region number over-estimation in case of noisy and heavy-tailed image distributions. On the other hand, our feature selection mechanism allows to automatically discard uninformative features, which leads to better discrimination and localization of regions in high-dimensional spaces. Experimental results on a large database of real-world images showed us the effectiveness of the proposed approach. © 2010 IEEE.},
keywords = {Clustering algorithms, Computer vision, Evolutionary algorithms, Feature extraction, Feature selection, Gaussian distribution, Generalized Gaussian, Generalized Gaussian Distributions, Heavy-tailed, High dimensional spaces, Image distributions, Image segmentation, Large database, Over-estimation, Real-world image, Unsupervised feature selection},
pubstate = {published},
tppubtype = {inproceedings}
}
Allili, M. S.; Ziou, D.
Using feature selection for object segmentation and tracking Proceedings Article
In: Proceedings - Fourth Canadian Conference on Computer and Robot Vision, CRV 2007, pp. 191–198, Montreal, QC, 2007, ISBN: 0-7695-2786-8 978-0-7695-2786-4.
Abstract | Links | BibTeX | Tags: Active contours, Algorithms, Feature extraction, Feature relevance, Image segmentation, Maximum likelihood, Mixture models, Negative examples, Object of interest (OOI), Optimization, Target tracking
@inproceedings{allili_using_2007,
title = {Using feature selection for object segmentation and tracking},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34548781938&doi=10.1109%2fCRV.2007.67&partnerID=40&md5=3fb26f3fcc7a6f55f705255758fef582},
doi = {10.1109/CRV.2007.67},
isbn = {0-7695-2786-8 978-0-7695-2786-4},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings - Fourth Canadian Conference on Computer and Robot Vision, CRV 2007},
pages = {191–198},
address = {Montreal, QC},
abstract = {Most image segmentation algorithms in the past are based on optimizing an objective function that aims to achieve the similarity between several low-level features to build a partition of the image into homogeneous regions. In the present paper, we propose to incorporate the relevance (selection) of the grouping features to enforce the segmentation toward the capturing of objects of interest. The relevance of the features is determined through a set of positive and negative examples of a specific object defined a priori by the user. The calculation of the relevance of the features is performed by maximizing an objective function defined on the mixture likelihoods of the positive and negative object examples sets. The incorporation of the features relevance in the object segmentation is formulated through an energy functional which is minimized by using level set active contours. We show the efficiency of the approach on several examples of object of interest segmentation and tracking where the features relevance was used. © 2007 IEEE.},
keywords = {Active contours, Algorithms, Feature extraction, Feature relevance, Image segmentation, Maximum likelihood, Mixture models, Negative examples, Object of interest (OOI), Optimization, Target tracking},
pubstate = {published},
tppubtype = {inproceedings}
}
Allili, M. S.; Ziou, D.
Object of interest segmentation and tracking by using feature selection and active contours Proceedings Article
In: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Minneapolis, MN, 2007, ISSN: 10636919, (ISSN: 10636919).
Abstract | Links | BibTeX | Tags: Feature extraction, Image acquisition, Image segmentation, Object recognition, Object segmentation, Objective functions, Optimization
@inproceedings{allili_object_2007,
title = {Object of interest segmentation and tracking by using feature selection and active contours},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34948855864&doi=10.1109%2fCVPR.2007.383449&partnerID=40&md5=2429a266190c72bb8fb8d3776c444906},
doi = {10.1109/CVPR.2007.383449},
issn = {10636919},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
address = {Minneapolis, MN},
abstract = {Most image segmentation algorithms in the past are based on optimizing an objective function that aims to achieve the similarity between several low-level features to build a partition of the image into homogeneous regions. In the present paper, we propose to incorporate the relevance (selection) of the grouping features to enforce the segmentation toward the capturing of objects of interest. The relevance of the features is determined through a set of positive and negative examples of a specific object defined a priori by the user. The calculation of the relevance of the features is performed by maximizing an objective function defined on the mixture likelihoods of the positive and negative object examples sets. The incorporation of the features relevance in the object segmentation is formulated through an energy functional which is minimized by using level set active contours. We show the efficiency of the approach on several examples of object of interest segmentation and tracking where the features relevance is used. © 2007 IEEE.},
note = {ISSN: 10636919},
keywords = {Feature extraction, Image acquisition, Image segmentation, Object recognition, Object segmentation, Objective functions, Optimization},
pubstate = {published},
tppubtype = {inproceedings}
}



