

de Recherche et d’Innovation
en Cybersécurité et Société
Côté, L.; Lamontagne, J.; Bellerose, A.; Blais, C.; Fiset, D.
The eyes are central to face detection: revisiting the foundations of face processing Article de journal
Dans: Vision Research, vol. 243, 2026, ISSN: 00426989 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: adult, article, Black person, Bubbles, Categorization, Caucasian, Detection, emotion assessment, Faces, Facial Recognition, facies, female, human, human experiment, Image analysis, information processing, Information use, male, Noise, normal human, perception, Prosopagnosia, spatial frequency discrimination, task performance, visual discrimination, Young Adult
@article{cote_eyes_2026,
title = {The eyes are central to face detection: revisiting the foundations of face processing},
author = {L. Côté and J. Lamontagne and A. Bellerose and C. Blais and D. Fiset},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105030389147&doi=10.1016%2Fj.visres.2026.108785&partnerID=40&md5=752aa5d9923ac60539e36118ad41e1e6},
doi = {10.1016/j.visres.2026.108785},
issn = {00426989 (ISSN)},
year = {2026},
date = {2026-01-01},
journal = {Vision Research},
volume = {243},
abstract = {Face detection feels effortless, yet it requires finely tuned computations to extract socially meaningful signals from the visual stream. Here, we used the Bubbles method to isolate the facial features and spatial frequency information that support face categorization. Across three experiments varying in task demands and visual context, the eye region consistently emerged as the most diagnostic source of information, particularly in high spatial frequencies. This finding held whether participants distinguished faces from noise, from non-face objects, or from real-world categories—suggesting that the eyes serve as an anchor point for categorization across contexts. Strikingly, this diagnostic profile mirrors that found in face identification tasks, implying that detection and recognition may rely on shared perceptual mechanisms rather than sequential, independent processes. This overlap sheds light on longstanding ambiguities in the prosopagnosia literature, indicating that detection impairments found in patients may stem from a broader failure to extract critical eye information. More broadly, our results invite a rethinking of the early stages of face processing, suggesting that detection already involves selective use of diagnostic facial features that supports recognition, emotional decoding, and social perception. © 2026 The Author(s).},
keywords = {adult, article, Black person, Bubbles, Categorization, Caucasian, Detection, emotion assessment, Faces, Facial Recognition, facies, female, human, human experiment, Image analysis, information processing, Information use, male, Noise, normal human, perception, Prosopagnosia, spatial frequency discrimination, task performance, visual discrimination, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Zetout, A.; Allili, M. S.
CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers Article de journal
Dans: Remote Sensing, vol. 17, no 14, 2025, ISSN: 20724292 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics
@article{zetout_csdnet_2025,
title = {CSDNet: Context-Aware Segmentation of Disaster Aerial Imagery Using Detection-Guided Features and Lightweight Transformers},
author = {A. Zetout and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011677142&doi=10.3390%2Frs17142337&partnerID=40&md5=a83db334b208d065476e0026ad0ee416},
doi = {10.3390/rs17142337},
issn = {20724292 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Remote Sensing},
volume = {17},
number = {14},
abstract = {Accurate multi-class semantic segmentation of disaster-affected areas is essential for rapid response and effective recovery planning. We present CSDNet, a context-aware segmentation model tailored to disaster scene scenarios, designed to improve segmentation of both large-scale disaster zones and small, underrepresented classes. The architecture combines a lightweight transformer module for global context modeling with depthwise separable convolutions (DWSCs) to enhance efficiency without compromising representational capacity. Additionally, we introduce a detection-guided feature fusion mechanism that integrates outputs from auxiliary detection tasks to mitigate class imbalance and improve discrimination of visually similar categories. Extensive experiments on several public datasets demonstrate that our model significantly improves segmentation of both man-made infrastructure and natural damage-related features, offering a robust and efficient solution for post-disaster analysis. © 2025 by the authors.},
keywords = {Aerial imagery, Affected area, Antennas, Class imbalance, Context-Aware, Contextual semantic segmentation, Contextual semantics, Detection, disaster response, Disaster-response, Emergency services, Error detection, Feature extraction, Lightweight model, Semantic segmentation, Semantics},
pubstate = {published},
tppubtype = {article}
}
Amirkhani, D.; Allili, M. S.; Hebbache, L.; Hammouche, N.; Lapointe, J.
Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review Article de journal
Dans: IEEE Transactions on Intelligent Transportation Systems, p. 1–23, 2024, ISSN: 15249050, (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Résumé | Liens | BibTeX | Étiquettes: Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization
@article{amirkhani_visual_2024,
title = {Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review},
author = {D. Amirkhani and M. S. Allili and L. Hebbache and N. Hammouche and J. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85186994244&doi=10.1109%2fTITS.2024.3365296&partnerID=40&md5=a9228252d620ad6d444cc395296ebac2},
doi = {10.1109/TITS.2024.3365296},
issn = {15249050},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Intelligent Transportation Systems},
pages = {1–23},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Visual inspection is an important process for maintaining bridges in road transportation systems, and preventing catastrophic events and tragedies. In this process, accurate and automatic concrete defect classification and detection are major components to ensure early identification of any issue that can compromise the bridge safety and integrity. While a tremendous body of research has been proposed in the last decades for addressing these problems, the advent of deep learning unleashed huge opportunities for building more accurate and efficient methods. Our aim in this survey is to study the recent progress of vision-based concrete bridge defect classification and detection in the deep learning era. Our review encompasses major aspects underlying typical frameworks, which include concrete defect taxonomy, public datasets and evaluation metrics. We provide also a taxonomy of deep-learning-based classification and detection algorithms with a detailed discussion of their advantages and limitations. We also benchmark baseline models for classification and detection, using two popular datasets. We finally discuss important challenges of concrete defect classification and detection, and promising research avenues to build better models and integrate them in real-world visual inspection systems, which warrant further scientific investigation. IEEE},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization},
pubstate = {published},
tppubtype = {article}
}



