

de Recherche et d’Innovation
en Cybersécurité et Société
Joudeh, I. O.; Cretu, A. -M.; Bouchard, S.
Predicting the Arousal and Valence Values of Emotional States Using Learned, Predesigned, and Deep Visual Features † Article de journal
Dans: Sensors, vol. 24, no 13, 2024, ISSN: 14248220 (ISSN), (Publisher: Multidisciplinary Digital Publishing Institute (MDPI)).
Résumé | Liens | BibTeX | Étiquettes: adult, Affective interaction, Arousal, artificial neural network, Cognitive state, Cognitive/emotional state, Collaborative interaction, computer, Convolutional neural networks, correlation coefficient, Deep learning, emotion, Emotional state, Emotions, female, Forecasting, Helmet mounted displays, human, Humans, Learning algorithms, Learning systems, Long short-term memory, Machine learning, Machine-learning, male, Mean square error, Neural networks, physiology, Regression, Root mean squared errors, Video recording, virtual reality, Visual feature, visual features
@article{joudeh_predicting_2024,
title = {Predicting the Arousal and Valence Values of Emotional States Using Learned, Predesigned, and Deep Visual Features †},
author = {I. O. Joudeh and A. -M. Cretu and S. Bouchard},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85198382238&doi=10.3390%2fs24134398&partnerID=40&md5=cefa8b2e2c044d02f99662af350007db},
doi = {10.3390/s24134398},
issn = {14248220 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Sensors},
volume = {24},
number = {13},
abstract = {The cognitive state of a person can be categorized using the circumplex model of emotional states, a continuous model of two dimensions: arousal and valence. The purpose of this research is to select a machine learning model(s) to be integrated into a virtual reality (VR) system that runs cognitive remediation exercises for people with mental health disorders. As such, the prediction of emotional states is essential to customize treatments for those individuals. We exploit the Remote Collaborative and Affective Interactions (RECOLA) database to predict arousal and valence values using machine learning techniques. RECOLA includes audio, video, and physiological recordings of interactions between human participants. To allow learners to focus on the most relevant data, features are extracted from raw data. Such features can be predesigned, learned, or extracted implicitly using deep learners. Our previous work on video recordings focused on predesigned and learned visual features. In this paper, we extend our work onto deep visual features. Our deep visual features are extracted using the MobileNet-v2 convolutional neural network (CNN) that we previously trained on RECOLA’s video frames of full/half faces. As the final purpose of our work is to integrate our solution into a practical VR application using head-mounted displays, we experimented with half faces as a proof of concept. The extracted deep features were then used to predict arousal and valence values via optimizable ensemble regression. We also fused the extracted visual features with the predesigned visual features and predicted arousal and valence values using the combined feature set. In an attempt to enhance our prediction performance, we further fused the predictions of the optimizable ensemble model with the predictions of the MobileNet-v2 model. After decision fusion, we achieved a root mean squared error (RMSE) of 0.1140, a Pearson’s correlation coefficient (PCC) of 0.8000, and a concordance correlation coefficient (CCC) of 0.7868 on arousal predictions. We achieved an RMSE of 0.0790, a PCC of 0.7904, and a CCC of 0.7645 on valence predictions. © 2024 by the authors.},
note = {Publisher: Multidisciplinary Digital Publishing Institute (MDPI)},
keywords = {adult, Affective interaction, Arousal, artificial neural network, Cognitive state, Cognitive/emotional state, Collaborative interaction, computer, Convolutional neural networks, correlation coefficient, Deep learning, emotion, Emotional state, Emotions, female, Forecasting, Helmet mounted displays, human, Humans, Learning algorithms, Learning systems, Long short-term memory, Machine learning, Machine-learning, male, Mean square error, Neural networks, physiology, Regression, Root mean squared errors, Video recording, virtual reality, Visual feature, visual features},
pubstate = {published},
tppubtype = {article}
}
Joudeh, I. O.; Cretu, A. -M.; Bouchard, S.; Guimond, S.
Prediction of Continuous Emotional Measures through Physiological and Visual Data † Article de journal
Dans: Sensors, vol. 23, no 12, 2023, ISSN: 14248220, (Publisher: MDPI).
Résumé | Liens | BibTeX | Étiquettes: Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment
@article{joudeh_prediction_2023,
title = {Prediction of Continuous Emotional Measures through Physiological and Visual Data †},
author = {I. O. Joudeh and A. -M. Cretu and S. Bouchard and S. Guimond},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85163943735&doi=10.3390%2fs23125613&partnerID=40&md5=5e970f0d8c5790b85d8d77a9f3f52a2d},
doi = {10.3390/s23125613},
issn = {14248220},
year = {2023},
date = {2023-01-01},
journal = {Sensors},
volume = {23},
number = {12},
abstract = {The affective state of a person can be measured using arousal and valence values. In this article, we contribute to the prediction of arousal and valence values from various data sources. Our goal is to later use such predictive models to adaptively adjust virtual reality (VR) environments and help facilitate cognitive remediation exercises for users with mental health disorders, such as schizophrenia, while avoiding discouragement. Building on our previous work on physiological, electrodermal activity (EDA) and electrocardiogram (ECG) recordings, we propose improving preprocessing and adding novel feature selection and decision fusion processes. We use video recordings as an additional data source for predicting affective states. We implement an innovative solution based on a combination of machine learning models alongside a series of preprocessing steps. We test our approach on RECOLA, a publicly available dataset. The best results are obtained with a concordance correlation coefficient (CCC) of 0.996 for arousal and 0.998 for valence using physiological data. Related work in the literature reported lower CCCs on the same data modality; thus, our approach outperforms the state-of-the-art approaches for RECOLA. Our study underscores the potential of using advanced machine learning techniques with diverse data sources to enhance the personalization of VR environments. © 2023 by the authors.},
note = {Publisher: MDPI},
keywords = {Affect recognition, Affective state, Arousal, Data-source, Deep learning, Electrocardiography, emotion, Emotion Recognition, Emotions, face recognition, Faces detection, Forecasting, human, Humans, Images processing, Learning systems, Machine learning, Machine-learning, mental disease, Mental Disorders, Physiological data, physiology, Signal-processing, Statistical tests, Video recording, Virtual-reality environment},
pubstate = {published},
tppubtype = {article}
}
Stetz, M. C.; Kaloi-Chen, J. Y.; Turner, D. D.; Bouchard, S.; Riva, G.; Wiederhold, B. K.
The effectiveness of Technology-Enhanced relaxation techniques for military medical warriors Article de journal
Dans: Military Medicine, vol. 176, no 9, p. 1065–1070, 2011, ISSN: 00264075, (Publisher: Association of Military Surgeons of the US).
Résumé | Liens | BibTeX | Étiquettes: adult, Anxiety, article, clinical trial, computer interface, controlled clinical trial, controlled study, female, human, Humans, male, mental stress, methodology, Military Personnel, Psychological, psychological aspect, questionnaire, Questionnaires, randomized controlled trial, Relaxation Therapy, relaxation training, soldier, Stress, User-Computer Interface, Video recording, videorecording
@article{stetz_effectiveness_2011,
title = {The effectiveness of Technology-Enhanced relaxation techniques for military medical warriors},
author = {M. C. Stetz and J. Y. Kaloi-Chen and D. D. Turner and S. Bouchard and G. Riva and B. K. Wiederhold},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052455147&doi=10.7205%2fMILMED-D-10-00393&partnerID=40&md5=dce993c0b65bb351edd74816a0d65450},
doi = {10.7205/MILMED-D-10-00393},
issn = {00264075},
year = {2011},
date = {2011-01-01},
journal = {Military Medicine},
volume = {176},
number = {9},
pages = {1065–1070},
abstract = {Combat zones can be very stressful for those in the area. Even in the battlefi eld, military medical personnel are expected to save others, while also staying alive. In this study, half of a sample of deployed military medical warriors (total n = 60) participated in technology-assisted relaxation training. Learning relaxation skills with a video clip of virtual reality relaxing scenes showed a statistically signifi cant impact on the anxiety levels of the Experimental Group. © Association of Military Surgeons of the U.S. All rights reserved.},
note = {Publisher: Association of Military Surgeons of the US},
keywords = {adult, Anxiety, article, clinical trial, computer interface, controlled clinical trial, controlled study, female, human, Humans, male, mental stress, methodology, Military Personnel, Psychological, psychological aspect, questionnaire, Questionnaires, randomized controlled trial, Relaxation Therapy, relaxation training, soldier, Stress, User-Computer Interface, Video recording, videorecording},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Object contour tracking in videos by using adaptive mixture models and shape priors Article d'actes
Dans: Proceedings of the International Symposium CompIMAGE 2006 - Computational Modelling of Objects Represented in Images: Fundamentals, Methods and Applications, p. 47–52, Coimbra, 2007, ISBN: 978-0-415-43349-5.
Résumé | Liens | BibTeX | Étiquettes: Active contours, Best fits, Current frames, Image matching, Maximum likelihood, Mixture models, Mixtures, Multi class, Non-static backgrounds, Object contours, Object tracking algorithms, Real video sequences, Robust tracking, Shape informations, Shape priors, Video recording, Video sequences
@inproceedings{allili_object_2007-1,
title = {Object contour tracking in videos by using adaptive mixture models and shape priors},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-60949085472&partnerID=40&md5=ba63e1abbabcfdd48583b41f700508ef},
isbn = {978-0-415-43349-5},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the International Symposium CompIMAGE 2006 - Computational Modelling of Objects Represented in Images: Fundamentals, Methods and Applications},
pages = {47–52},
address = {Coimbra},
abstract = {In this paper, we propose a novel object tracking algorithm in video sequences. The method is based on object mixture matching between successive frames of the sequence by using active contours. Only the segmentation of the objects in the first frame is required for initialization. The evolution of the object contour on a current frame aims to find the maximum fidelity of the mixture likelihood for the same object between successive frames while having the best fit of the mixture parameters to the homogenous parts of the objects. To permit for a precise and robust tracking, region, boundary and shape information are coupled in the model. The method permits for tracking multi-class objects on cluttered and non-static backgrounds. We validate our approach on examples of tracking performed on real video sequences. © 2007 Taylor & Francis Group.},
keywords = {Active contours, Best fits, Current frames, Image matching, Maximum likelihood, Mixture models, Mixtures, Multi class, Non-static backgrounds, Object contours, Object tracking algorithms, Real video sequences, Robust tracking, Shape informations, Shape priors, Video recording, Video sequences},
pubstate = {published},
tppubtype = {inproceedings}
}