

de Recherche et d’Innovation
en Cybersécurité et Société
Joudeh, I. O.; Cretu, A. -M.; Bouchard, S.; Guimond, S.
Prediction of Continuous Emotional Measures through Physiological and Visual Data † Article de journal
Dans: Sensors, vol. 23, no 12, 2023, ISSN: 14248220, (Publisher: MDPI).
Résumé | Liens | BibTeX | Étiquettes: Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment
@article{joudeh_prediction_2023,
title = {Prediction of Continuous Emotional Measures through Physiological and Visual Data †},
author = {I. O. Joudeh and A. -M. Cretu and S. Bouchard and S. Guimond},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85163943735&doi=10.3390%2fs23125613&partnerID=40&md5=5e970f0d8c5790b85d8d77a9f3f52a2d},
doi = {10.3390/s23125613},
issn = {14248220},
year = {2023},
date = {2023-01-01},
journal = {Sensors},
volume = {23},
number = {12},
abstract = {The affective state of a person can be measured using arousal and valence values. In this article, we contribute to the prediction of arousal and valence values from various data sources. Our goal is to later use such predictive models to adaptively adjust virtual reality (VR) environments and help facilitate cognitive remediation exercises for users with mental health disorders, such as schizophrenia, while avoiding discouragement. Building on our previous work on physiological, electrodermal activity (EDA) and electrocardiogram (ECG) recordings, we propose improving preprocessing and adding novel feature selection and decision fusion processes. We use video recordings as an additional data source for predicting affective states. We implement an innovative solution based on a combination of machine learning models alongside a series of preprocessing steps. We test our approach on RECOLA, a publicly available dataset. The best results are obtained with a concordance correlation coefficient (CCC) of 0.996 for arousal and 0.998 for valence using physiological data. Related work in the literature reported lower CCCs on the same data modality; thus, our approach outperforms the state-of-the-art approaches for RECOLA. Our study underscores the potential of using advanced machine learning techniques with diverse data sources to enhance the personalization of VR environments. © 2023 by the authors.},
note = {Publisher: MDPI},
keywords = {Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment},
pubstate = {published},
tppubtype = {article}
}
Royer, J.; Blais, C.; Barnabé-Lortie, V.; Carré, M.; Leclerc, J.; Fiset, D.
Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes! Article de journal
Dans: Vision Research, vol. 123, p. 33–40, 2016, ISSN: 00426989 (ISSN), (Publisher: Elsevier Ltd).
Résumé | Liens | BibTeX | Étiquettes: accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult
@article{royer_efficient_2016,
title = {Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes!},
author = {J. Royer and C. Blais and V. Barnabé-Lortie and M. Carré and J. Leclerc and D. Fiset},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84968779426&doi=10.1016%2fj.visres.2016.04.004&partnerID=40&md5=4c63f6eea279f7322c9af23ae9ed22c1},
doi = {10.1016/j.visres.2016.04.004},
issn = {00426989 (ISSN)},
year = {2016},
date = {2016-01-01},
journal = {Vision Research},
volume = {123},
pages = {33–40},
abstract = {Faces are encountered in highly diverse angles in real-world settings. Despite this considerable diversity, most individuals are able to easily recognize familiar faces. The vast majority of studies in the field of face recognition have nonetheless focused almost exclusively on frontal views of faces. Indeed, a number of authors have investigated the diagnostic facial features for the recognition of frontal views of faces previously encoded in this same view. However, the nature of the information useful for identity matching when the encoded face and test face differ in viewing angle remains mostly unexplored. The present study addresses this issue using individual differences and bubbles, a method that pinpoints the facial features effectively used in a visual categorization task. Our results indicate that the use of features located in the center of the face, the lower left portion of the nose area and the center of the mouth, are significantly associated with individual efficiency to generalize a face's identity across different viewpoints. However, as faces become more familiar, the reliance on this area decreases, while the diagnosticity of the eye region increases. This suggests that a certain distinction can be made between the visual mechanisms subtending viewpoint invariance and face recognition in the case of unfamiliar face identification. Our results further support the idea that the eye area may only come into play when the face stimulus is particularly familiar to the observer. © 2016 Elsevier Ltd.},
note = {Publisher: Elsevier Ltd},
keywords = {accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Baaziz, N.; Mejri, M.
Texture modeling using contourlets and finite mixtures of generalized gaussian distributions and applications Article de journal
Dans: IEEE Transactions on Multimedia, vol. 16, no 3, p. 772–784, 2014, ISSN: 15209210, (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Résumé | Liens | BibTeX | Étiquettes: Contourlet coefficients, Contourlet transform, Defects, Directional information, Fabric texture, face recognition, Generalized Gaussian Distributions, Inspection, Mixtures, Probability density function, Probability density functions (PDFs), State-of-the-art methods, Texture retrieval, Textures
@article{allili_texture_2014,
title = {Texture modeling using contourlets and finite mixtures of generalized gaussian distributions and applications},
author = {M. S. Allili and N. Baaziz and M. Mejri},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84896903467&doi=10.1109%2fTMM.2014.2298832&partnerID=40&md5=16b2fa741e71e1e581f6b0f54c43a676},
doi = {10.1109/TMM.2014.2298832},
issn = {15209210},
year = {2014},
date = {2014-01-01},
journal = {IEEE Transactions on Multimedia},
volume = {16},
number = {3},
pages = {772–784},
abstract = {In this paper, we develop a new framework for contourlet-based statistical modeling using finite Mixtures of Generalized Gaussian distributions (MoGG). On the one hand, given the rich directional information provided by the contourlet transform (CT), we propose to use a redundant version of the CT, which describes texture structures more accurately. On the other hand, we use MoGG modeling of contourlet coefficients distribution, which allows for precise capturing of a wide range of histogram shapes and provides better description and discrimination of texture than single probability density functions (pdfs). Moreover, we propose three applications for the proposed approach, namely: (1) texture retrieval, (2) fabric texture defect detection, and 3) infrared (IR) face recognition. We compare two implementations of the CT: standard CT (SCT) and redundant CT (RCT). We show that the proposed approach yields better results in the applications studied compared to recent state-of-the-art methods. © 2014 IEEE.},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Contourlet coefficients, Contourlet transform, Defects, Directional information, Fabric texture, face recognition, Generalized Gaussian Distributions, Inspection, Mixtures, Probability density function, Probability density functions (PDFs), State-of-the-art methods, Texture retrieval, Textures},
pubstate = {published},
tppubtype = {article}
}
Blais, C.; Jack, R. E.; Scheepers, C.; Fiset, D.; Caldara, R.
Culture shapes how we look at faces Article de journal
Dans: PLoS ONE, vol. 3, no 8, 2008, ISSN: 19326203 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: adult, article, Asian, Asian Continental Ancestry Group, Caucasian, Classification, Cross-Cultural Comparison, cultural anthropology, cultural factor, Culture, East Asian, European Continental Ancestry Group, Eye, eye fixation, eye movement, Eye movements, Face, face asymmetry, face recognition, female, Fixation, histology, human, human experiment, Humans, Learning, male, methodology, Mouth, normal human, Nose, observer variation, Ocular, physiology, race difference, recognition, Recognition (Psychology), vision, visual memory, Visual Perception
@article{blais_culture_2008,
title = {Culture shapes how we look at faces},
author = {C. Blais and R. E. Jack and C. Scheepers and D. Fiset and R. Caldara},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-51549087752&doi=10.1371%2fjournal.pone.0003022&partnerID=40&md5=e75dcf9792dbd03fd1ef5894b81bfc4f},
doi = {10.1371/journal.pone.0003022},
issn = {19326203 (ISSN)},
year = {2008},
date = {2008-01-01},
journal = {PLoS ONE},
volume = {3},
number = {8},
abstract = {Background: Face processing, amongst many basic visual skills, is thought to be invariant across all humans. From as early as 1965, studies of eye movements have consistently revealed a systematic triangular sequence of fixations over the eyes and the mouth, suggesting that faces elicit a universal, biologically-determined information extraction pattern. Methodology/Principal Findings: Here we monitored the eye movements of Western Caucasian and East Asian observers while they learned, recognized, and categorized by race Western Caucasian and East Asian faces. Western Caucasian observers reproduced a scattered triangular pattern of fixations for faces of both races and across tasks. Contrary to intuition, East Asian observers focused more on the central region of the face. Conclusions/Significance: These results demonstrate that face processing can no longer be considered as arising from a universal series of perceptual events. The strategy employed to extract visual information from faces differs across cultures. © 2008 Blais et al.},
keywords = {adult, article, Asian, Asian Continental Ancestry Group, Caucasian, Classification, Cross-Cultural Comparison, cultural anthropology, cultural factor, Culture, East Asian, European Continental Ancestry Group, Eye, eye fixation, eye movement, Eye movements, Face, face asymmetry, face recognition, female, Fixation, histology, human, human experiment, Humans, Learning, male, methodology, Mouth, normal human, Nose, observer variation, Ocular, physiology, race difference, recognition, Recognition (Psychology), vision, visual memory, Visual Perception},
pubstate = {published},
tppubtype = {article}
}