

de Recherche et d’Innovation
en Cybersécurité et Société
Abdollahzadeh, S.; Allili, M. S.; Boulmerka, A.; Lapointe, J. -F.
A Vision-Based Framework for Safe Landing Zone Mapping of UAVs in Dynamic Environments Journal Article
In: IEEE Open Journal of the Computer Society, vol. 7, pp. 492–503, 2026, ISSN: 26441268 (ISSN).
Abstract | Links | BibTeX | Tags: Aerial vehicle, Air navigation, Aircraft detection, Aircraft landing, Antennas, automatic UAV navigation, Computer vision, Dynamic environments, Forecasting, Homographies, Landing zones, Learning systems, Motion tracking, Object detection, Object recognition, Object Tracking, object trajectory prediction, Robotics, Safe landing, Safe landing zone, safe landing zones (SLZ), Semantic segmentation, Semantics, Trajectories, Trajectory forecasting, Uncrewed aerial vehicles (UAVs), Unmanned aerial vehicle, Unmanned aerial vehicles (UAV)
@article{abdollahzadeh_vision-based_2026,
title = {A Vision-Based Framework for Safe Landing Zone Mapping of UAVs in Dynamic Environments},
author = {S. Abdollahzadeh and M. S. Allili and A. Boulmerka and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105029942397&doi=10.1109%2FOJCS.2026.3663268&partnerID=40&md5=b11484e035458c84b1d3f6780b92c91c},
doi = {10.1109/OJCS.2026.3663268},
issn = {26441268 (ISSN)},
year = {2026},
date = {2026-01-01},
journal = {IEEE Open Journal of the Computer Society},
volume = {7},
pages = {492–503},
abstract = {Identification safe landing zones (SLZ) for Uncrewed Aerial Vehicles (UAVs) is important to ensure reliable and safe navigation, especially when they are operated in complex and safety-critical environments. However, this is a challenging task due to obstacles and UAV motion. This paper proposes a vision-based framework that maps SLZs in dynamic scenes by integrating several functionalities for analyzing visually static and dynamic aspects of a scene. Static analysis is achieved through context-aware segmentation which divides the image into thematic classes enabling to identify suitable landing surfaces (e.g., roads, grass). For dynamic content analysis, we combine object detection, tracking, and trajectory prediction to determine object occupancy and identify regions free of obstacles. Trajectory prediction is performed through a novel encoder–decoder architecture taking past object positions to predict the most likely future locations. To ensure stable and robust trajectory prediction, we introduce an optimized homography computation using multi-scale image analysis and cumulative updates to compensate UAV motion. We tested our framework on different operational scenarios, including urban and natural scenes with moving objects like vehicles and pedestrians. Obtained results demonstrate its strong performance, and its significant potential for enabling autonomous and safe UAV navigation. © 2020 IEEE.},
keywords = {Aerial vehicle, Air navigation, Aircraft detection, Aircraft landing, Antennas, automatic UAV navigation, Computer vision, Dynamic environments, Forecasting, Homographies, Landing zones, Learning systems, Motion tracking, Object detection, Object recognition, Object Tracking, object trajectory prediction, Robotics, Safe landing, Safe landing zone, safe landing zones (SLZ), Semantic segmentation, Semantics, Trajectories, Trajectory forecasting, Uncrewed aerial vehicles (UAVs), Unmanned aerial vehicle, Unmanned aerial vehicles (UAV)},
pubstate = {published},
tppubtype = {article}
}
Cheddadi, A. El; Moudoud, H.; Gagnon, S.
The role of artificial intelligence and machine learning in safeguarding IoMT security and privacy Journal Article
In: Digital Forensics in Next-Generation Internet of Medical Things: Balancing Security and Sustainability, pp. 1–17, 2025, ISSN: 978-100364032-5 (ISBN); 978-104107046-7 (ISBN).
Links | BibTeX | Tags: Artificial intelligence learning, Data privacy, Learning systems, Machine learning, Machine-learning, Security and privacy
@article{el_cheddadi_role_2025,
title = {The role of artificial intelligence and machine learning in safeguarding IoMT security and privacy},
author = {A. El Cheddadi and H. Moudoud and S. Gagnon},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105024398178&doi=10.1201%2F9781003640325-1&partnerID=40&md5=442f29d04b42d63f9fd11c83ad4a77df},
doi = {10.1201/9781003640325-1},
issn = {978-100364032-5 (ISBN); 978-104107046-7 (ISBN)},
year = {2025},
date = {2025-01-01},
journal = {Digital Forensics in Next-Generation Internet of Medical Things: Balancing Security and Sustainability},
pages = {1–17},
keywords = {Artificial intelligence learning, Data privacy, Learning systems, Machine learning, Machine-learning, Security and privacy},
pubstate = {published},
tppubtype = {article}
}
Amirkhani, D.; Allili, M. S.; Lapointe, J. -F.
CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds Journal Article
In: IEEE Transactions on Automation Science and Engineering, vol. 22, pp. 19197–19214, 2025, ISSN: 15455955 (ISSN).
Abstract | Links | BibTeX | Tags: Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures
@article{amirkhani_cracksight_2025,
title = {CrackSight: An Efficient Crack Segmentation Model in Varying Acquisition Ranges and Complex Backgrounds},
author = {D. Amirkhani and M. S. Allili and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011756992&doi=10.1109%2FTASE.2025.3591407&partnerID=40&md5=d908b79e863a4725d10bec325b761f34},
doi = {10.1109/TASE.2025.3591407},
issn = {15455955 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Transactions on Automation Science and Engineering},
volume = {22},
pages = {19197–19214},
abstract = {Accurate crack segmentation in concrete transportation infrastructures is critical for ensuring structural integrity and facilitating timely maintenance interventions. This paper presents CrackSight, an end-to-end deep learning model for precise crack segmentation across varying observational ranges and extremely complex backgrounds. CrackSight seamlessly integrates crack detection and segmentation through two branches. The Detection Feature Extraction Branch (DFEB) provides global context for crack localization in complex backgrounds or at far observation ranges. It guides the segmentation model to focus on regions with the highest crack-prone potential. The segmentation branch leverages the fusion of multi-scale feature maps using dilated convolutions, allowing to capture subtle and complex crack patterns. The branch also incorporates the Dual-Attention Linear Focus Mechanism (DALFM) enhancing crack segmentation through saliency-driven improvements. Finally, CrackSight uses a novel hybrid contextual loss, which dynamically compensates for class imbalance and enhance crack discrimination against complex backgrounds. Our model is also lightweight and can be run in resource-constrained environments, making it suitable for real-world inspection using mobile platforms. Our results demonstrate that it significantly improves segmentation accuracy, setting a new benchmark for crack segmentation. The dataset and additional resources are available on GitHub. Note to Practitioners—CrackSight is a dual-branch deep learning framework designed for accurate and efficient segmentation of concrete cracks under challenging real-world conditions. By combining a detection-guided localization branch with a context-aware segmentation, CrackSight offers enhanced robustness to noise, background clutter, and varying acquisition distances, common challenges in UAV-based infrastructure inspections. Its architecture integrates multi-scale feature fusion and adaptive contextual guidance, enabling reliable detection of both fine and fragmented cracks. With its lightweight design and fast inference time, CrackSight offers practitioners a practical and scalable solution for automating visual inspection tasks, reducing manual effort, and improving safety in structural health monitoring workflows. © 2025 IEEE.},
keywords = {Attention mechanisms, Codes (symbols), Complex background, complex backgrounds, Crack detection, Crack propagation, Crack segmentation, Crack segmentations, Detection features, End to end, Feature extraction, Features extraction, Global context, Image segmentation, Learning models, Learning systems, Segmentation models, Transportation infrastructures},
pubstate = {published},
tppubtype = {article}
}
Joudeh, I. O.; Cretu, A. -M.; Bouchard, S.
Predicting the Arousal and Valence Values of Emotional States Using Learned, Predesigned, and Deep Visual Features † Journal Article
In: Sensors, vol. 24, no. 13, 2024, ISSN: 14248220 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Abstract | Links | BibTeX | Tags: adult, Affective interaction, Arousal, artificial neural network, Cognitive state, Cognitive/emotional state, Collaborative interaction, computer, Convolutional neural networks, correlation coefficient, Deep learning, emotion, Emotional state, Emotions, female, Forecasting, Helmet mounted displays, human, Humans, Learning algorithms, Learning systems, Long short-term memory, Machine learning, Machine-learning, male, Mean square error, Neural networks, physiology, Regression, Root mean squared errors, Video recording, virtual reality, Visual feature, visual features
@article{joudeh_predicting_2024,
title = {Predicting the Arousal and Valence Values of Emotional States Using Learned, Predesigned, and Deep Visual Features †},
author = {I. O. Joudeh and A. -M. Cretu and S. Bouchard},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198382238&doi=10.3390%2fs24134398&partnerID=40&md5=cefa8b2e2c044d02f99662af350007db},
doi = {10.3390/s24134398},
issn = {14248220 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Sensors},
volume = {24},
number = {13},
publisher = {Multidisciplinary Digital Publishing Institute (MDPI)},
abstract = {The cognitive state of a person can be categorized using the circumplex model of emotional states, a continuous model of two dimensions: arousal and valence. The purpose of this research is to select a machine learning model(s) to be integrated into a virtual reality (VR) system that runs cognitive remediation exercises for people with mental health disorders. As such, the prediction of emotional states is essential to customize treatments for those individuals. We exploit the Remote Collaborative and Affective Interactions (RECOLA) database to predict arousal and valence values using machine learning techniques. RECOLA includes audio, video, and physiological recordings of interactions between human participants. To allow learners to focus on the most relevant data, features are extracted from raw data. Such features can be predesigned, learned, or extracted implicitly using deep learners. Our previous work on video recordings focused on predesigned and learned visual features. In this paper, we extend our work onto deep visual features. Our deep visual features are extracted using the MobileNet-v2 convolutional neural network (CNN) that we previously trained on RECOLA’s video frames of full/half faces. As the final purpose of our work is to integrate our solution into a practical VR application using head-mounted displays, we experimented with half faces as a proof of concept. The extracted deep features were then used to predict arousal and valence values via optimizable ensemble regression. We also fused the extracted visual features with the predesigned visual features and predicted arousal and valence values using the combined feature set. In an attempt to enhance our prediction performance, we further fused the predictions of the optimizable ensemble model with the predictions of the MobileNet-v2 model. After decision fusion, we achieved a root mean squared error (RMSE) of 0.1140, a Pearson’s correlation coefficient (PCC) of 0.8000, and a concordance correlation coefficient (CCC) of 0.7868 on arousal predictions. We achieved an RMSE of 0.0790, a PCC of 0.7904, and a CCC of 0.7645 on valence predictions. © 2024 by the authors.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {adult, Affective interaction, Arousal, artificial neural network, Cognitive state, Cognitive/emotional state, Collaborative interaction, computer, Convolutional neural networks, correlation coefficient, Deep learning, emotion, Emotional state, Emotions, female, Forecasting, Helmet mounted displays, human, Humans, Learning algorithms, Learning systems, Long short-term memory, Machine learning, Machine-learning, male, Mean square error, Neural networks, physiology, Regression, Root mean squared errors, Video recording, virtual reality, Visual feature, visual features},
pubstate = {published},
tppubtype = {article}
}
Joudeh, I. O.; Cretu, A. -M.; Bouchard, S.; Guimond, S.
Prediction of Continuous Emotional Measures through Physiological and Visual Data † Journal Article
In: Sensors, vol. 23, no. 12, pp. 17–21, 2023, ISSN: 14248220, (Publisher: Interactive Media Institute).
Abstract | Links | BibTeX | Tags: Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment
@article{joudeh_prediction_2023-1,
title = {Prediction of Continuous Emotional Measures through Physiological and Visual Data †},
author = {I. O. Joudeh and A. -M. Cretu and S. Bouchard and S. Guimond},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85163943735&doi=10.3390%2fs23125613&partnerID=40&md5=5e970f0d8c5790b85d8d77a9f3f52a2d},
doi = {10.3390/s23125613},
issn = {14248220},
year = {2023},
date = {2023-01-01},
journal = {Sensors},
volume = {23},
number = {12},
pages = {17–21},
publisher = {MDPI},
abstract = {The affective state of a person can be measured using arousal and valence values. In this article, we contribute to the prediction of arousal and valence values from various data sources. Our goal is to later use such predictive models to adaptively adjust virtual reality (VR) environments and help facilitate cognitive remediation exercises for users with mental health disorders, such as schizophrenia, while avoiding discouragement. Building on our previous work on physiological, electrodermal activity (EDA) and electrocardiogram (ECG) recordings, we propose improving preprocessing and adding novel feature selection and decision fusion processes. We use video recordings as an additional data source for predicting affective states. We implement an innovative solution based on a combination of machine learning models alongside a series of preprocessing steps. We test our approach on RECOLA, a publicly available dataset. The best results are obtained with a concordance correlation coefficient (CCC) of 0.996 for arousal and 0.998 for valence using physiological data. Related work in the literature reported lower CCCs on the same data modality; thus, our approach outperforms the state-of-the-art approaches for RECOLA. Our study underscores the potential of using advanced machine learning techniques with diverse data sources to enhance the personalization of VR environments. © 2023 by the authors.},
note = {Publisher: Interactive Media Institute},
keywords = {Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment},
pubstate = {published},
tppubtype = {article}
}
Yapi, D.; Mejri, M.; Allili, M. S.; Baaziz, N.
A learning-based approach for automatic defect detection in textile images Proceedings Article
In: A., Zaremba M. Sasiadek J. Dolgui (Ed.): IFAC-PapersOnLine, pp. 2423–2428, 2015, ISBN: 24058963 (ISSN), (Journal Abbreviation: IFAC-PapersOnLine).
Abstract | Links | BibTeX | Tags: Algorithms, Artificial intelligence, Automatic defect detections, Barium compounds, Bayes Classifier, Computational efficiency, Contourlets, Defect detection, Defect detection algorithm, Defects, Detection problems, Feature extraction, Feature extraction and classification, Gaussians, Image classification, Learning algorithms, Learning systems, Learning-based approach, Machine learning approaches, Mixture of generalized gaussians, Mixtures of generalized Gaussians (MoGG), Textile defect detection, Textile images, Textiles, Textures
@inproceedings{yapi_learning-based_2015,
title = {A learning-based approach for automatic defect detection in textile images},
author = {D. Yapi and M. Mejri and M. S. Allili and N. Baaziz},
editor = {Zaremba M. Sasiadek J. Dolgui A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84953865559&doi=10.1016%2fj.ifacol.2015.06.451&partnerID=40&md5=3dd0ef4c27cbd55700f6511af5f46772},
doi = {10.1016/j.ifacol.2015.06.451},
isbn = {24058963 (ISSN)},
year = {2015},
date = {2015-01-01},
booktitle = {IFAC-PapersOnLine},
volume = {28},
number = {3},
pages = {2423–2428},
abstract = {This paper addresses the textile defect detection problem using a machine-learning approach. We propose a novel algorithm that uses supervised learning to classify textile textures in defect and non-defect classes based on suitable feature extraction and classification. We use statistical modeling of multi-scale contourlet image decomposition to obtain compact and accurate signatures for texture description. Our defect detection algorithm is based on two phases. In the first phase, using a training set of images, we extract reference defect-free signatures for each textile category. Then, we use the Bayes classifier (BC) to learn signatures of defected and non-defected classes. In the second phase, defects are detected on new images using the trained BC and an appropriate decomposition of images into blocks. Our algorithm has the capability to achieve highly accurate defect detection and localisation in textile textures while ensuring an efficient computational time. Compared to recent state-of-the-art methods, our algorithm has yielded better results on the standard TILDA database. © 2015, IFAC (International Federation of Automatic Control) Hosting by Elsevier Ltd. All rights reserved.},
note = {Journal Abbreviation: IFAC-PapersOnLine},
keywords = {Algorithms, Artificial intelligence, Automatic defect detections, Barium compounds, Bayes Classifier, Computational efficiency, Contourlets, Defect detection, Defect detection algorithm, Defects, Detection problems, Feature extraction, Feature extraction and classification, Gaussians, Image classification, Learning algorithms, Learning systems, Learning-based approach, Machine learning approaches, Mixture of generalized gaussians, Mixtures of generalized Gaussians (MoGG), Textile defect detection, Textile images, Textiles, Textures},
pubstate = {published},
tppubtype = {inproceedings}
}



