

de Recherche et d’Innovation
en Cybersécurité et Société
Amirkhani, D.; Allili, M. S.; Lapointe, J. -F.
CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds Article de journal
Dans: IEEE Transactions on Automation Science and Engineering, vol. 22, p. 19197–19214, 2025, ISSN: 15455955 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures
@article{amirkhani_cracksight_2025,
title = {CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds},
author = {D. Amirkhani and M. S. Allili and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011756992&doi=10.1109%2FTASE.2025.3591407&partnerID=40&md5=d908b79e863a4725d10bec325b761f34},
doi = {10.1109/TASE.2025.3591407},
issn = {15455955 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Automation Science and Engineering},
volume = {22},
pages = {19197–19214},
abstract = {Accurate crack segmentation in concrete transportation infrastructures is critical for ensuring structural integrity and facilitating timely maintenance interventions. This paper presents CrackSight, an end-to-end deep learning model for precise crack segmentation across varying observational ranges and extremely complex backgrounds. CrackSight seamlessly integrates crack detection and segmentation through two branches. The Detection Feature Extraction Branch (DFEB) provides global context for crack localization in complex backgrounds or at far observation ranges. It guides the segmentation model to focus on regions with the highest crack-prone potential. The segmentation branch leverages the fusion of multi-scale feature maps using dilated convolutions, allowing to capture subtle and complex crack patterns. The branch also incorporates the Dual-Attention Linear Focus Mechanism (DALFM) enhancing crack segmentation through saliency-driven improvements. Finally, CrackSight uses a novel hybrid contextual loss, which dynamically compensates for class imbalance and enhance crack discrimination against complex backgrounds. Our model is also lightweight and can be run in resource-constrained environments, making it suitable for real-world inspection using mobile platforms. Our results demonstrate that it significantly improves segmentation accuracy, setting a new benchmark for crack segmentation. The dataset and additional resources are available on GitHub. Note to Practitioners—CrackSight is a dual-branch deep learning framework designed for accurate and efficient segmentation of concrete cracks under challenging real-world conditions. By combining a detection-guided localization branch with a context-aware segmentation, CrackSight offers enhanced robustness to noise, background clutter, and varying acquisition distances, common challenges in UAV-based infrastructure inspections. Its architecture integrates multi-scale feature fusion and adaptive contextual guidance, enabling reliable detection of both fine and fragmented cracks. With its lightweight design and fast inference time, CrackSight offers practitioners a practical and scalable solution for automating visual inspection tasks, reducing manual effort, and improving safety in structural health monitoring workflows. © 2025 IEEE.},
keywords = {Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures},
pubstate = {published},
tppubtype = {article}
}
Bouafia, Y.; Allili, M. S.; Hebbache, L.; Guezouli, L.
SES-ReNet: Lightweight deep learning model for human detection in hazy weather conditions Article de journal
Dans: Signal Processing: Image Communication, vol. 130, 2025, ISSN: 09235965 (ISSN), (Publisher: Elsevier B.V.).
Résumé | Liens | BibTeX | Étiquettes: Condition, Deep learning, face recognition, Hazy weather, Human detection, Knowledge distillation, Learning models, Lightweight Retinanet, Outdoor scenes, Personal safety, Personal security, Safety and securities
@article{bouafia_ses-renet_2025,
title = {SES-ReNet: Lightweight deep learning model for human detection in hazy weather conditions},
author = {Y. Bouafia and M. S. Allili and L. Hebbache and L. Guezouli},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85208562795&doi=10.1016%2fj.image.2024.117223&partnerID=40&md5=322b79f8d78045395efcabcbf86c6e1c},
doi = {10.1016/j.image.2024.117223},
issn = {09235965 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Signal Processing: Image Communication},
volume = {130},
publisher = {Elsevier B.V.},
abstract = {Accurate detection of people in outdoor scenes plays an essential role in improving personal safety and security. However, existing human detection algorithms face significant challenges when visibility is reduced and human appearance is degraded, particularly in hazy weather conditions. To address this problem, we present a novel lightweight model based on the RetinaNet detection architecture. The model incorporates a lightweight backbone feature extractor, a dehazing functionality based on knowledge distillation (KD), and a multi-scale attention mechanism based on the Squeeze and Excitation (SE) principle. KD is achieved from a larger network trained on unhazed clear images, whereas attention is incorporated at low-level and high-level features of the network. Experimental results have shown remarkable performance, outperforming state-of-the-art methods while running at 22 FPS. The combination of high accuracy and real-time capabilities makes our approach a promising solution for effective human detection in challenging weather conditions and suitable for real-time applications. © 2024},
note = {Publisher: Elsevier B.V.},
keywords = {Condition, Deep learning, face recognition, Hazy weather, Human detection, Knowledge distillation, Learning models, Lightweight Retinanet, Outdoor scenes, Personal safety, Personal security, Safety and securities},
pubstate = {published},
tppubtype = {article}
}



