

de Recherche et d’Innovation
en Cybersécurité et Société
Amirkhani, D.; Allili, M. S.; Hebbache, L.; Hammouche, N.; Lapointe, J.
Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review Journal Article
In: IEEE Transactions on Intelligent Transportation Systems, pp. 1–23, 2024, ISSN: 15249050, (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Abstract | Links | BibTeX | Tags: Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization
@article{amirkhani_visual_2024,
title = {Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review},
author = {D. Amirkhani and M. S. Allili and L. Hebbache and N. Hammouche and J. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85186994244&doi=10.1109%2fTITS.2024.3365296&partnerID=40&md5=a9228252d620ad6d444cc395296ebac2},
doi = {10.1109/TITS.2024.3365296},
issn = {15249050},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Intelligent Transportation Systems},
pages = {1–23},
abstract = {Visual inspection is an important process for maintaining bridges in road transportation systems, and preventing catastrophic events and tragedies. In this process, accurate and automatic concrete defect classification and detection are major components to ensure early identification of any issue that can compromise the bridge safety and integrity. While a tremendous body of research has been proposed in the last decades for addressing these problems, the advent of deep learning unleashed huge opportunities for building more accurate and efficient methods. Our aim in this survey is to study the recent progress of vision-based concrete bridge defect classification and detection in the deep learning era. Our review encompasses major aspects underlying typical frameworks, which include concrete defect taxonomy, public datasets and evaluation metrics. We provide also a taxonomy of deep-learning-based classification and detection algorithms with a detailed discussion of their advantages and limitations. We also benchmark baseline models for classification and detection, using two popular datasets. We finally discuss important challenges of concrete defect classification and detection, and promising research avenues to build better models and integrate them in real-world visual inspection systems, which warrant further scientific investigation. IEEE},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization},
pubstate = {published},
tppubtype = {article}
}
Charbonneau, I.; Guérette, J.; Cormier, S.; Blais, C.; Lalonde-Beaudoin, G.; Smith, F. W.; Fiset, D.
The role of spatial frequencies for facial pain categorization Journal Article
In: Scientific Reports, vol. 11, no. 1, 2021, ISSN: 20452322, (Publisher: Nature Research).
Abstract | Links | BibTeX | Tags: Adolescent, adult, Classification, Distance Perception, emotion, Emotions, Face, face pain, Facial Expression, Facial Pain, Facial Recognition, female, human, Humans, Knowledge, male, Normal Distribution, Pattern Recognition, procedures, psychology, Psychophysics, recognition, reproducibility, Reproducibility of Results, Visual, Young Adult
@article{charbonneau_role_2021,
title = {The role of spatial frequencies for facial pain categorization},
author = {I. Charbonneau and J. Guérette and S. Cormier and C. Blais and G. Lalonde-Beaudoin and F. W. Smith and D. Fiset},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85111138273&doi=10.1038%2fs41598-021-93776-7&partnerID=40&md5=d759d0218de65fce371bb51d7f2593d8},
doi = {10.1038/s41598-021-93776-7},
issn = {20452322},
year = {2021},
date = {2021-01-01},
journal = {Scientific Reports},
volume = {11},
number = {1},
abstract = {Studies on low-level visual information underlying pain categorization have led to inconsistent findings. Some show an advantage for low spatial frequency information (SFs) and others a preponderance of mid SFs. This study aims to clarify this gap in knowledge since these results have different theoretical and practical implications, such as how far away an observer can be in order to categorize pain. This study addresses this question by using two complementary methods: a data-driven method without a priori expectations about the most useful SFs for pain recognition and a more ecological method that simulates the distance of stimuli presentation. We reveal a broad range of important SFs for pain recognition starting from low to relatively high SFs and showed that performance is optimal in a short to medium distance (1.2–4.8 m) but declines significantly when mid SFs are no longer available. This study reconciles previous results that show an advantage of LSFs over HSFs when using arbitrary cutoffs, but above all reveal the prominent role of mid-SFs for pain recognition across two complementary experimental tasks. © 2021, The Author(s).},
note = {Publisher: Nature Research},
keywords = {Adolescent, adult, Classification, Distance Perception, emotion, Emotions, Face, face pain, Facial Expression, Facial Pain, Facial Recognition, female, human, Humans, Knowledge, male, Normal Distribution, Pattern Recognition, procedures, psychology, Psychophysics, recognition, reproducibility, Reproducibility of Results, Visual, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Cote, S. S. -P.; Paquette, G. R.; Neveu, S. -M.; Chartier, S.; Labbe, D. R.; Renaud, P.
Combining electroencephalography with plethysmography for classification of deviant sexual preferences. Proceedings Article
In: Proceedings - 9th International Workshop on Biometrics and Forensics, IWBF 2021, Institute of Electrical and Electronics Engineers Inc., 2021, ISBN: 978-172819556-8 (ISBN), (Journal Abbreviation: Proc. - Int. Workshop Biom. Forensics, IWBF).
Abstract | Links | BibTeX | Tags: Biometrics, Classification, Classification (of information), Decision trees, Deviant sexual preferences, Dimensionality reduction, Electroencephalography, Electrophysiology, extraction, Extraction method, Machine learning, Plethysmography, Proof of concept, Psychophysiological measures, Standard protocols, Variable selection and extraction
@inproceedings{cote_combining_2021,
title = {Combining electroencephalography with plethysmography for classification of deviant sexual preferences.},
author = {S. S. -P. Cote and G. R. Paquette and S. -M. Neveu and S. Chartier and D. R. Labbe and P. Renaud},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85113855965&doi=10.1109%2fIWBF50991.2021.9465078&partnerID=40&md5=b545b2a6d22e32115ac179399188960e},
doi = {10.1109/IWBF50991.2021.9465078},
isbn = {978-172819556-8 (ISBN)},
year = {2021},
date = {2021-01-01},
booktitle = {Proceedings - 9th International Workshop on Biometrics and Forensics, IWBF 2021},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
abstract = {Evaluating sexual preferences is a difficult task. Past researchrelied mostly on penile plethysmography (PPG). Even though this technique is the standard protocol used in most currentforensic settings, its usage showed mixed results. One way to improve PPG is the addition of other psychophysiological measures such as electroencephalography (EEG). However, EEG generates significant amount of data that hinders classification. Machine learning (ML) is nowadays an excellent tool to identify most discriminating variables and for classification. Therefore, it is proposed to use ML selection and extraction methods for dimensionality reduction and then to classify sexual preferences. Evidence from this proof of concept shows that using EEG and PPG together leads to better classification (85.6%) than using EEG (82.2%) or PPG individually (74.4%). The Random Forest (RF) classifier combined with the Principal Component Analysis (PCA) extraction method achieves a slightly higher general performance rate. This increase in performances opens the door for using more reliable biometric measures in the assessment of deviant sexual preferences. © 2021 IEEE.},
note = {Journal Abbreviation: Proc. - Int. Workshop Biom. Forensics, IWBF},
keywords = {Biometrics, Classification, Classification (of information), Decision trees, Deviant sexual preferences, Dimensionality reduction, Electroencephalography, Electrophysiology, extraction, Extraction method, Machine learning, Plethysmography, Proof of concept, Psychophysiological measures, Standard protocols, Variable selection and extraction},
pubstate = {published},
tppubtype = {inproceedings}
}
Saidani, N.; Adi, K.; Allili, M. S.
A semantic-based classification approach for an enhanced spam detection Journal Article
In: Computers and Security, vol. 94, 2020, ISSN: 01674048 (ISSN), (Publisher: Elsevier Ltd).
Abstract | Links | BibTeX | Tags: Classification, Classification approach, Conceptual views, Domain-specific analysis, Electronic mail, Email content, Multilevel analysis, Semantic analysis, Semantic content, Semantic features, Semantic levels, Semantics, Spam detection
@article{saidani_semantic-based_2020,
title = {A semantic-based classification approach for an enhanced spam detection},
author = {N. Saidani and K. Adi and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85084283123&doi=10.1016%2fj.cose.2020.101716&partnerID=40&md5=539ac0fc0a7144fe983f514175a138e2},
doi = {10.1016/j.cose.2020.101716},
issn = {01674048 (ISSN)},
year = {2020},
date = {2020-01-01},
journal = {Computers and Security},
volume = {94},
abstract = {In this paper, we explore the use of a text semantic analysis to improve the accuracy of spam detection. We propose a method based on two semantic level analysis. In the first level, we categorize emails by specific domains (e.g., Health, Education, Finance, etc.) to enable a separate conceptual view for spams in each domain. In the second level, we combine a set of manually-specified and automatically-extracted semantic features for spam detection in each domain. These features are meant to summarize the email content into compact topics discriminating spam from non-spam emails in an efficient way. We show that the proposed method enables a better spam detection compared to existing methods based on bag-of-words (BoW) and semantic content, and leads to more interpretable results. © 2020},
note = {Publisher: Elsevier Ltd},
keywords = {Classification, Classification approach, Conceptual views, Domain-specific analysis, Electronic mail, Email content, Multilevel analysis, Semantic analysis, Semantic content, Semantic features, Semantic levels, Semantics, Spam detection},
pubstate = {published},
tppubtype = {article}
}
Ouyed, O.; Allili, M. S.
Feature weighting for multinomial kernel logistic regression and application to action recognition Journal Article
In: Neurocomputing, vol. 275, pp. 1752–1768, 2018, ISSN: 09252312, (Publisher: Elsevier B.V.).
Abstract | Links | BibTeX | Tags: Action recognition, article, Classification, classification algorithm, Classification performance, Computer applications, controlled study, embedding, Feature relevance, feature relevance for multinomial kernel logistic regression, Feature weighting, Kernel logistic regression, kernel method, Learning, mathematical computing, Multinomial kernels, multinominal kernel logistic regression, Neural networks, priority journal, recognition, regression analysis, simulation, sparse modeling, Sparse models, sparse multinomial logistic regression, sparsity promoting regularization, standard, Supervised classification
@article{ouyed_feature_2018,
title = {Feature weighting for multinomial kernel logistic regression and application to action recognition},
author = {O. Ouyed and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035104467&doi=10.1016%2fj.neucom.2017.10.024&partnerID=40&md5=09687b392a405be4338799a750932cf3},
doi = {10.1016/j.neucom.2017.10.024},
issn = {09252312},
year = {2018},
date = {2018-01-01},
journal = {Neurocomputing},
volume = {275},
pages = {1752–1768},
abstract = {Multinominal kernel logistic regression (MKLR) is a supervised classification method designed for separating classes with non-linear boundaries. However, it relies on the assumption that all features are equally important, which may decrease classification performance when dealing with high-dimensional and noisy data. We propose an approach for embedding feature relevance in multinomial kernel logistic regression. Our approach, coined fr-MKLR, generalizes MKLR by introducing a feature weighting scheme in the Gaussian kernel and using the so-called ℓ0-“norm” as sparsity-promoting regularization. Therefore, the contribution of each feature is tuned according to its relevance for classification which leads to more generalizable and interpretable sparse models for classification. Application of our approach to several standard datasets and video action recognition has provided very promising results compared to other methods. © 2017 Elsevier B.V.},
note = {Publisher: Elsevier B.V.},
keywords = {Action recognition, article, Classification, classification algorithm, Classification performance, Computer applications, controlled study, embedding, Feature relevance, feature relevance for multinomial kernel logistic regression, Feature weighting, Kernel logistic regression, kernel method, Learning, mathematical computing, Multinomial kernels, multinominal kernel logistic regression, Neural networks, priority journal, recognition, regression analysis, simulation, sparse modeling, Sparse models, sparse multinomial logistic regression, sparsity promoting regularization, standard, Supervised classification},
pubstate = {published},
tppubtype = {article}
}
Roy, C.; Blais, C.; Fiset, D.; Rainville, P.; Gosselin, F.
Efficient information for recognizing pain in facial expressions Journal Article
In: European Journal of Pain (United Kingdom), vol. 19, no. 6, pp. 852–860, 2015, ISSN: 10903801 (ISSN).
Abstract | Links | BibTeX | Tags: anger, article, association, Classification, Cues, disgust, emotion, Emotions, Facial Expression, Fear, female, happiness, human, human experiment, Humans, male, nociception, normal human, Pain, pain assessment, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, random sample, reproducibility, Reproducibility of Results, sadness, statistical significance, Visual, visual information, visual stimulation
@article{roy_efficient_2015,
title = {Efficient information for recognizing pain in facial expressions},
author = {C. Roy and C. Blais and D. Fiset and P. Rainville and F. Gosselin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929122739&doi=10.1002%2fejp.676&partnerID=40&md5=027f6da7b6d5c98c86de6a07766fb83d},
doi = {10.1002/ejp.676},
issn = {10903801 (ISSN)},
year = {2015},
date = {2015-01-01},
journal = {European Journal of Pain (United Kingdom)},
volume = {19},
number = {6},
pages = {852–860},
abstract = {Background The face as a visual stimulus is a reliable source of information for judging the pain experienced by others. Until now, most studies investigating the facial expression of pain have used a descriptive method (i.e. Facial Action Coding System). However, the facial features that are relevant for the observer in the identification of the expression of pain remain largely unknown despite the strong medical impact that misjudging pain can have on patients' well-being. Methods Here, we investigated this question by applying the Bubbles method. Fifty healthy volunteers were asked to categorize facial expressions (the six basic emotions, pain and neutrality) displayed in stimuli obtained from a previously validated set and presented for 500 ms each. To determine the critical areas of the face used in this categorization task, the faces were partly masked based on random sampling of regions of the stimuli at different spatial frequency ranges. Results Results show that accurate pain discrimination relies mostly on the frown lines and the mouth. Finally, an ideal observer analysis indicated that the use of the frown lines in human observers could not be attributed to the objective 'informativeness' of this area. Conclusions Based on a recent study suggesting that this area codes for the affective dimension of pain, we propose that the visual system has evolved to focus primarily on the facial cues that signal the aversiveness of pain, consistent with the social role of facial expressions in the communication of potential threats. © 2015 European Pain Federation-EFIC®.},
keywords = {anger, article, association, Classification, Cues, disgust, emotion, Emotions, Facial Expression, Fear, female, happiness, human, human experiment, Humans, male, nociception, normal human, Pain, pain assessment, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, random sample, reproducibility, Reproducibility of Results, sadness, statistical significance, Visual, visual information, visual stimulation},
pubstate = {published},
tppubtype = {article}
}
Blais, C.; Jack, R. E.; Scheepers, C.; Fiset, D.; Caldara, R.
Culture shapes how we look at faces Journal Article
In: PLoS ONE, vol. 3, no. 8, 2008, ISSN: 19326203 (ISSN).
Abstract | Links | BibTeX | Tags: adult, article, Asian, Asian Continental Ancestry Group, Caucasian, Classification, Cross-Cultural Comparison, cultural anthropology, cultural factor, Culture, East Asian, European Continental Ancestry Group, Eye, eye fixation, eye movement, Eye movements, Face, face asymmetry, face recognition, female, Fixation, histology, human, human experiment, Humans, Learning, male, methodology, Mouth, normal human, Nose, observer variation, Ocular, physiology, race difference, recognition, Recognition (Psychology), vision, visual memory, Visual Perception
@article{blais_culture_2008,
title = {Culture shapes how we look at faces},
author = {C. Blais and R. E. Jack and C. Scheepers and D. Fiset and R. Caldara},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-51549087752&doi=10.1371%2fjournal.pone.0003022&partnerID=40&md5=e75dcf9792dbd03fd1ef5894b81bfc4f},
doi = {10.1371/journal.pone.0003022},
issn = {19326203 (ISSN)},
year = {2008},
date = {2008-01-01},
journal = {PLoS ONE},
volume = {3},
number = {8},
abstract = {Background: Face processing, amongst many basic visual skills, is thought to be invariant across all humans. From as early as 1965, studies of eye movements have consistently revealed a systematic triangular sequence of fixations over the eyes and the mouth, suggesting that faces elicit a universal, biologically-determined information extraction pattern. Methodology/Principal Findings: Here we monitored the eye movements of Western Caucasian and East Asian observers while they learned, recognized, and categorized by race Western Caucasian and East Asian faces. Western Caucasian observers reproduced a scattered triangular pattern of fixations for faces of both races and across tasks. Contrary to intuition, East Asian observers focused more on the central region of the face. Conclusions/Significance: These results demonstrate that face processing can no longer be considered as arising from a universal series of perceptual events. The strategy employed to extract visual information from faces differs across cultures. © 2008 Blais et al.},
keywords = {adult, article, Asian, Asian Continental Ancestry Group, Caucasian, Classification, Cross-Cultural Comparison, cultural anthropology, cultural factor, Culture, East Asian, European Continental Ancestry Group, Eye, eye fixation, eye movement, Eye movements, Face, face asymmetry, face recognition, female, Fixation, histology, human, human experiment, Humans, Learning, male, methodology, Mouth, normal human, Nose, observer variation, Ocular, physiology, race difference, recognition, Recognition (Psychology), vision, visual memory, Visual Perception},
pubstate = {published},
tppubtype = {article}
}