

de Recherche et d’Innovation
en Cybersécurité et Société
Abdollahzadeh, S.; Allili, M. S.; Boulmerka, A.; Lapointe, J. -F.
A Vision-Based Framework for Safe Landing Zone Mapping of UAVs in Dynamic Environments Article de journal
Dans: IEEE Open Journal of the Computer Society, vol. 7, p. 492–503, 2026, ISSN: 26441268 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Aerial vehicle, Air navigation, Aircraft detection, Aircraft landing, Antennas, automatic UAV navigation, Computer vision, Dynamic environments, Forecasting, Homographies, Landing zones, Learning systems, Motion tracking, Object detection, Object recognition, Object Tracking, object trajectory prediction, Robotics, Safe landing, Safe landing zone, safe landing zones (SLZ), Semantic segmentation, Semantics, Trajectories, Trajectory forecasting, Uncrewed aerial vehicles (UAVs), Unmanned aerial vehicle, Unmanned aerial vehicles (UAV)
@article{abdollahzadeh_vision-based_2026,
title = {A Vision-Based Framework for Safe Landing Zone Mapping of UAVs in Dynamic Environments},
author = {S. Abdollahzadeh and M. S. Allili and A. Boulmerka and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105029942397&doi=10.1109%2FOJCS.2026.3663268&partnerID=40&md5=b11484e035458c84b1d3f6780b92c91c},
doi = {10.1109/OJCS.2026.3663268},
issn = {26441268 (ISSN)},
year = {2026},
date = {2026-01-01},
journal = {IEEE Open Journal of the Computer Society},
volume = {7},
pages = {492–503},
abstract = {Identification safe landing zones (SLZ) for Uncrewed Aerial Vehicles (UAVs) is important to ensure reliable and safe navigation, especially when they are operated in complex and safety-critical environments. However, this is a challenging task due to obstacles and UAV motion. This paper proposes a vision-based framework that maps SLZs in dynamic scenes by integrating several functionalities for analyzing visually static and dynamic aspects of a scene. Static analysis is achieved through context-aware segmentation which divides the image into thematic classes enabling to identify suitable landing surfaces (e.g., roads, grass). For dynamic content analysis, we combine object detection, tracking, and trajectory prediction to determine object occupancy and identify regions free of obstacles. Trajectory prediction is performed through a novel encoder–decoder architecture taking past object positions to predict the most likely future locations. To ensure stable and robust trajectory prediction, we introduce an optimized homography computation using multi-scale image analysis and cumulative updates to compensate UAV motion. We tested our framework on different operational scenarios, including urban and natural scenes with moving objects like vehicles and pedestrians. Obtained results demonstrate its strong performance, and its significant potential for enabling autonomous and safe UAV navigation. © 2020 IEEE.},
keywords = {Aerial vehicle, Air navigation, Aircraft detection, Aircraft landing, Antennas, automatic UAV navigation, Computer vision, Dynamic environments, Forecasting, Homographies, Landing zones, Learning systems, Motion tracking, Object detection, Object recognition, Object Tracking, object trajectory prediction, Robotics, Safe landing, Safe landing zone, safe landing zones (SLZ), Semantic segmentation, Semantics, Trajectories, Trajectory forecasting, Uncrewed aerial vehicles (UAVs), Unmanned aerial vehicle, Unmanned aerial vehicles (UAV)},
pubstate = {published},
tppubtype = {article}
}
Zetout, A.; Allili, M. S.
CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers Article de journal
Dans: Remote Sensing, vol. 17, no 14, 2025, ISSN: 20724292 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics
@article{zetout_csdnet_2025,
title = {CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers},
author = {A. Zetout and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011677142&doi=10.3390%2Frs17142337&partnerID=40&md5=a83db334b208d065476e0026ad0ee416},
doi = {10.3390/rs17142337},
issn = {20724292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Remote Sensing},
volume = {17},
number = {14},
abstract = {Accurate multi-class semantic segmentation of disaster-affected areas is essential for rapid response and effective recovery planning. We present CSDNet, a context-aware segmentation model tailored to disaster scene scenarios, designed to improve segmentation of both large-scale disaster zones and small, underrepresented classes. The architecture combines a lightweight transformer module for global context modeling with depthwise separable convolutions (DWSCs) to enhance efficiency without compromising representational capacity. Additionally, we introduce a detection-guided feature fusion mechanism that integrates outputs from auxiliary detection tasks to mitigate class imbalance and improve discrimination of visually similar categories. Extensive experiments on several public datasets demonstrate that our model significantly improves segmentation of both man-made infrastructure and natural damage-related features, offering a robust and efficient solution for post-disaster analysis. © 2025 by the authors.},
keywords = {Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics},
pubstate = {published},
tppubtype = {article}
}
Abdollahzadeh, S.; Allili, M. S.; Boulmerka, A.; Lapointe, J. -F.
Visual Safety Mapping for UAV Landings Using Ordinal Regression Networks Article de journal
Dans: IEEE Transactions on Artificial Intelligence, 2025, ISSN: 26914581 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: automatic UAV navigation, deep ordinal regression, safe landing zones (SLZ), Semantic segmentation
@article{abdollahzadeh_visual_2025,
title = {Visual Safety Mapping for UAV Landings Using Ordinal Regression Networks},
author = {S. Abdollahzadeh and M. S. Allili and A. Boulmerka and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105023324811&doi=10.1109%2FTAI.2025.3635093&partnerID=40&md5=14d5d4e4558cf5f4db08bd7d2a61a945},
doi = {10.1109/TAI.2025.3635093},
issn = {26914581 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Artificial Intelligence},
abstract = {As Unmanned Aerial Vehicles (UAVs) see growing use in civilian applications, reliably identifying Safe Landing Zones (SLZs) in varied environments is essential for autonomous navigation and emergency response. Passive vision sensors offer a low-cost, lightweight solution for real-time terrain analysis and 3D scene reconstruction, making them ideal for onboard systems. We introduce OR-SLZNet, an original deep learning model based on ordinal regression to predict SLZs from UAV imagery. Unlike prior approaches, OR-SLZNet produces dense, multi-level safety maps by jointly leveraging photometric (e.g., color and texture) and geometric cues (e.g., flatness, slope, and depth), assigning each pixel an ordinal safety score that reflects landing suitability. With real-time inference (textasciitilde0.02s/frame), the model supports onboard deployment and rapid decision-making in time-critical situations. Extensive experiments on five diverse datasets demonstrate OR-SLZNet effectiveness and strong generalization across a wide range of structural complexities. © 2020 IEEE.},
keywords = {automatic UAV navigation, deep ordinal regression, safe landing zones (SLZ), Semantic segmentation},
pubstate = {published},
tppubtype = {article}
}
Abdollahzadeh, S.; Proulx, P. -L.; Allili, M. S.; Lapointe, J. -F.
Safe Landing Zones Detection for UAVs Using Deep Regression Article d'actes
Dans: Proceedings - 2022 19th Conference on Robots and Vision, CRV 2022, p. 213–218, Institute of Electrical and Electronics Engineers Inc., 2022, ISBN: 978-1-66549-774-9.
Résumé | Liens | BibTeX | Étiquettes: Aerial vehicle, Air navigation, Aircraft detection, Antennas, Automatic unmanned aerial vehicle navigation, Deep learning, Deep regression, Landing, Landing zones, Safe landing, Safe landing zone, Semantic segmentation, Semantics, Unmanned aerial vehicles (UAV), Urban areas, Vehicle navigation, Zone detection
@inproceedings{abdollahzadeh_safe_2022,
title = {Safe Landing Zones Detection for UAVs Using Deep Regression},
author = {S. Abdollahzadeh and P. -L. Proulx and M. S. Allili and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85138466098&doi=10.1109%2fCRV55824.2022.00035&partnerID=40&md5=9183f6cd002c8a9068716faf66da72ec},
doi = {10.1109/CRV55824.2022.00035},
isbn = {978-1-66549-774-9},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings - 2022 19th Conference on Robots and Vision, CRV 2022},
pages = {213–218},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Finding safe landing zones (SLZ) in urban areas and natural scenes is one of the many challenges that must be overcome in automating Unmanned Aerial Vehicles (UAV) navigation. Using passive vision sensors to achieve this objective is a very promising avenue due to their low cost and the potential they provide for performing simultaneous terrain analysis and 3D reconstruction. In this paper, we propose using a deep learning approach on UAV imagery to assess the SLZ. The model is built on a semantic segmentation architecture whereby thematic classes of the terrain are mapped into safety scores for UAV landing. Contrary to past methods, which use hard classification into safe/unsafe landing zones, our approach provides a continuous safety map that is more practical for an emergency landing. Experiments on public datasets have shown promising results. © 2022 IEEE.},
keywords = {Aerial vehicle, Air navigation, Aircraft detection, Antennas, Automatic unmanned aerial vehicle navigation, Deep learning, Deep regression, Landing, Landing zones, Safe landing, Safe landing zone, Semantic segmentation, Semantics, Unmanned aerial vehicles (UAV), Urban areas, Vehicle navigation, Zone detection},
pubstate = {published},
tppubtype = {inproceedings}
}
Messaoudi, H.; Belaid, A.; Allaoui, M. L.; Zetout, A.; Allili, M. S.; Tliba, S.; Salem, D. Ben; Conze, P. -H.
Efficient Embedding Network for 3D Brain Tumor Segmentation Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 12658 LNCS, p. 252–262, 2021, ISSN: 03029743, (ISBN: 9783030720834).
Résumé | Liens | BibTeX | Étiquettes: 3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors
@article{messaoudi_efficient_2021,
title = {Efficient Embedding Network for 3D Brain Tumor Segmentation},
author = {H. Messaoudi and A. Belaid and M. L. Allaoui and A. Zetout and M. S. Allili and S. Tliba and D. Ben Salem and P. -H. Conze},
editor = {Bakas S. Crimi A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85107387134&doi=10.1007%2f978-3-030-72084-1_23&partnerID=40&md5=b3aa3516b0465a1bf5611db4727d95f1},
doi = {10.1007/978-3-030-72084-1_23},
issn = {03029743},
year = {2021},
date = {2021-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {12658 LNCS},
pages = {252–262},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {3D medical image processing with deep learning greatly suffers from a lack of data. Thus, studies carried out in this field are limited compared to works related to 2D natural image analysis, where very large datasets exist. As a result, powerful and efficient 2D convolutional neural networks have been developed and trained. In this paper, we investigate a way to transfer the performance of a two-dimensional classification network for the purpose of three-dimensional semantic segmentation of brain tumors. We propose an asymmetric U-Net network by incorporating the EfficientNet model as part of the encoding branch. As the input data is in 3D, the first layers of the encoder are devoted to the reduction of the third dimension in order to fit the input of the EfficientNet network. Experimental results on validation and test data from the BraTS 2020 challenge demonstrate that the proposed method achieve promising performance. © 2021, Springer Nature Switzerland AG.},
note = {ISBN: 9783030720834},
keywords = {3D medical image processing, Brain, Brain tumor segmentation, Classification networks, Convolutional neural networks, Deep learning, Embedding network, Image segmentation, Large dataset, Large datasets, Medical imaging, Natural images, Net networks, Semantic segmentation, Semantics, Signal encoding, Tumors},
pubstate = {published},
tppubtype = {article}
}



