

de Recherche et d’Innovation
en Cybersécurité et Société
Bogie, B. J. M.; Noël, C.; Gu, F.; Nadeau, S.; Shvetz, C.; Khan, H.; Rivard, M. -C.; Bouchard, S.; Lepage, M.; Guimond, S.
Using virtual reality to improve verbal episodic memory in schizophrenia: A proof-of-concept trial Article de journal
Dans: Schizophrenia Research: Cognition, vol. 36, 2024, ISSN: 22150013 (ISSN), (Publisher: Elsevier Inc.).
Résumé | Liens | BibTeX | Étiquettes: adult, article, clinical article, clinical assessment, Cognitive remediation therapy, cybersickness, disease severity, dizziness, Ecological treatment, Episodic memory, exclusion VR criteria questionnaire, feasibility study, female, Hopkins verbal learning test, human, male, mini international neuropsychiatric interview, nausea, outcome assessment, Positive and Negative Syndrome Scale, Proof of concept, questionnaire, randomized controlled trial, schizophrenia, scoring system, Semantic encoding, Semantics, task performance, training, Verbal memory, virtual reality, vr experience questionnaire
@article{bogie_using_2024,
title = {Using virtual reality to improve verbal episodic memory in schizophrenia: A proof-of-concept trial},
author = {B. J. M. Bogie and C. Noël and F. Gu and S. Nadeau and C. Shvetz and H. Khan and M. -C. Rivard and S. Bouchard and M. Lepage and S. Guimond},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85186986986&doi=10.1016%2fj.scog.2024.100305&partnerID=40&md5=a15c598b45b8f44a40b25fe5fd078a06},
doi = {10.1016/j.scog.2024.100305},
issn = {22150013 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Schizophrenia Research: Cognition},
volume = {36},
abstract = {Background: Schizophrenia is associated with impairments in verbal episodic memory. Strategy for Semantic Association Memory (SESAME) training represents a promising cognitive remediation program to improve verbal episodic memory. Virtual reality (VR) may be a novel tool to increase the ecological validity and transfer of learned skills of traditional cognitive remediation programs. The present proof-of-concept study aimed to assess the feasibility, acceptability, and preliminary efficacy of a VR-based cognitive remediation module inspired by SESAME principles to improve the use of verbal episodic memory strategies in schizophrenia. Methods: Thirty individuals with schizophrenia/schizoaffective disorder completed this study. Participants were randomized to either a VR-based verbal episodic memory training condition inspired by SESAME principles (intervention group) or an active control condition (control group). In the training condition, a coach taught semantic encoding strategies (active rehearsal and semantic clustering) to help participants remember restaurant orders in VR. In the active control condition, participants completed visuospatial puzzles in VR. Attrition rate, participant experience ratings, and cybersickness questionnaires were used to assess feasibility and acceptability. Trial 1 of the Hopkins Verbal Learning Test – Revised was administered pre- and post-intervention to assess preliminary efficacy. Results: Feasibility was demonstrated by a low attrition rate (5.88 %), and acceptability was demonstrated by limited cybersickness and high levels of enjoyment. Although the increase in the number of semantic clusters used following the module did not reach conventional levels of statistical significance in the intervention group, it demonstrated a notable trend with a medium effect size (t = 1.48},
note = {Publisher: Elsevier Inc.},
keywords = {adult, article, clinical article, clinical assessment, Cognitive remediation therapy, cybersickness, disease severity, dizziness, Ecological treatment, Episodic memory, exclusion VR criteria questionnaire, feasibility study, female, Hopkins verbal learning test, human, male, mini international neuropsychiatric interview, nausea, outcome assessment, Positive and Negative Syndrome Scale, Proof of concept, questionnaire, randomized controlled trial, schizophrenia, scoring system, Semantic encoding, Semantics, task performance, training, Verbal memory, virtual reality, vr experience questionnaire},
pubstate = {published},
tppubtype = {article}
}
Guitard, T.; Bouchard, S.; Bélanger, C.; Berthiaume, M.
Exposure to a standardized catastrophic scenario in virtual reality or a personalized scenario in imagination for Generalized Anxiety Disorder Article de journal
Dans: Journal of Clinical Medicine, vol. 8, no 3, 2019, ISSN: 20770383 (ISSN), (Publisher: MDPI).
Résumé | Liens | BibTeX | Étiquettes: adult, anxiety assessment, article, avoidance behavior, clinical article, cognitive avoidance questionnaire, cognitive behavioral therapy, Cognitive exposure, disease severity, DSM-IV, Exposure in virtual reality, fatigue, female, gatineau presence questionnaire, generalized anxiety disorder, Generalized Anxiety Disorder (GAD), human, human experiment, imagination, immersive tendencies questionnaire, Likert scale, male, Middle Aged, mini international neuropsychiatric interview, penn state worry questionnaire, Personalized scenario, Positive and Negative Affect Schedule, Positive and Negative Syndrome Scale, Presence Questionnaire, psychotherapy, questionnaire, Simulator Sickness Questionnaire, Standardized scenario, task performance, test retest reliability, time series analysis, virtual reality
@article{guitard_exposure_2019,
title = {Exposure to a standardized catastrophic scenario in virtual reality or a personalized scenario in imagination for Generalized Anxiety Disorder},
author = {T. Guitard and S. Bouchard and C. Bélanger and M. Berthiaume},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073896961&doi=10.3390%2fjcm8030309&partnerID=40&md5=b80f2e6602416c35dd8e36fd8b19c803},
doi = {10.3390/jcm8030309},
issn = {20770383 (ISSN)},
year = {2019},
date = {2019-01-01},
journal = {Journal of Clinical Medicine},
volume = {8},
number = {3},
abstract = {The cognitive behavioral treatment of generalized anxiety disorder (GAD) often involves exposing patients to a catastrophic scenario depicting their most feared worry. The aim of this study was to examine whether a standardized scenario recreated in virtual reality (VR) would elicit anxiety and negative affect and how it compared to the traditional method of imagining a personalized catastrophic scenario. A sample of 28 participants were first exposed to a neutral non-catastrophic scenario and then to a personalized scenario in imagination or a standardized virtual scenario presented in a counterbalanced order. The participants completed questionnaires before and after each immersion. The results suggest that the standardized virtual scenario induced significant anxiety. No difference was found when comparing exposure to the standardized scenario in VR and exposure to the personalized scenario in imagination. These findings were specific to anxiety and not to the broader measure of negative affect. Individual differences in susceptibility to feel present in VR was a significant predictor of increase in anxiety and negative affect. Future research could use these scenarios to conduct a randomized control trial to test the efficacy and cost/benefits of using VR in the treatment of GAD. © 2019 by the authors. Licensee MDPI, Basel, Switzerland.},
note = {Publisher: MDPI},
keywords = {adult, anxiety assessment, article, avoidance behavior, clinical article, cognitive avoidance questionnaire, cognitive behavioral therapy, Cognitive exposure, disease severity, DSM-IV, Exposure in virtual reality, fatigue, female, gatineau presence questionnaire, generalized anxiety disorder, Generalized Anxiety Disorder (GAD), human, human experiment, imagination, immersive tendencies questionnaire, Likert scale, male, Middle Aged, mini international neuropsychiatric interview, penn state worry questionnaire, Personalized scenario, Positive and Negative Affect Schedule, Positive and Negative Syndrome Scale, Presence Questionnaire, psychotherapy, questionnaire, Simulator Sickness Questionnaire, Standardized scenario, task performance, test retest reliability, time series analysis, virtual reality},
pubstate = {published},
tppubtype = {article}
}
Ansado, J.; Brulé, J.; Chasen, C.; Northoff, G.; Bouchard, S.
The virtual reality working-memory-training program (VR WORK M): Description of an individualized, integrated program Article de journal
Dans: Annual Review of CyberTherapy and Telemedicine, vol. 2018, no 16, p. 101–117, 2018, ISSN: 15548716, (Publisher: Interactive Media Institute).
Résumé | Liens | BibTeX | Étiquettes: article, cognition, daily life activity, human, rehabilitation, task performance, training, virtual reality, working memory
@article{ansado_virtual_2018,
title = {The virtual reality working-memory-training program (VR WORK M): Description of an individualized, integrated program},
author = {J. Ansado and J. Brulé and C. Chasen and G. Northoff and S. Bouchard},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85067881079&partnerID=40&md5=c964c5af28aa91128bdeaa0b9b89e645},
issn = {15548716},
year = {2018},
date = {2018-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {2018},
number = {16},
pages = {101–117},
abstract = {Working memory (WM), which allows us to retain information in memory during a complex task, is a cognitive function that is crucial to daily life. It can be affected by several neurological conditions, such as traumatic brain injury or stroke. Numerous studies suggest that it is possible to resolve WM deficits using targeted rehabilitation programs. Virtual reality (VR) is an innovative technology that has proven to be valuable in the evaluation and rehabilitation of cognitive functions. It potentially optimizes cognitive stimulation in a safe environment and can help improve functional activities of daily living by replicating real-life scenarios. With that in mind, this article introduces the first VR-based WM rehabilitation program. The WM training program (Virtual Reality Working-Memory-Training program, VR-WORK-M) recreates a restaurant environment where participants must complete a WM task consisting in repeating a series of items heard via a headset. The goal is to train WM by simulating a business proposal presentation. The program contains several levels of difficulty resulting from the combination of four complexity factors: (1) the type of business concerned by the proposal (e.g., opening a bakery vs. opening a flower shop); (2) the number of items to repeat (4 vs. 5 vs. 6); (3) the number of subtasks to complete before the WM task (e.g., introducing oneself vs order a drink); and (4) the modality of distractors (e.g., an auditory distractor vs. a visual distractor). VR-WORK-M includes 54 levels of difficulty to be administered in a training program over a period of four weeks, with four or five sessions per week. © 2018, Interactive Media Institute. All rights reserved.},
note = {Publisher: Interactive Media Institute},
keywords = {article, cognition, daily life activity, human, rehabilitation, task performance, training, virtual reality, working memory},
pubstate = {published},
tppubtype = {article}
}
Royer, J.; Blais, C.; Barnabé-Lortie, V.; Carré, M.; Leclerc, J.; Fiset, D.
Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes! Article de journal
Dans: Vision Research, vol. 123, p. 33–40, 2016, ISSN: 00426989 (ISSN), (Publisher: Elsevier Ltd).
Résumé | Liens | BibTeX | Étiquettes: accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult
@article{royer_efficient_2016,
title = {Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes!},
author = {J. Royer and C. Blais and V. Barnabé-Lortie and M. Carré and J. Leclerc and D. Fiset},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84968779426&doi=10.1016%2fj.visres.2016.04.004&partnerID=40&md5=4c63f6eea279f7322c9af23ae9ed22c1},
doi = {10.1016/j.visres.2016.04.004},
issn = {00426989 (ISSN)},
year = {2016},
date = {2016-01-01},
journal = {Vision Research},
volume = {123},
pages = {33–40},
abstract = {Faces are encountered in highly diverse angles in real-world settings. Despite this considerable diversity, most individuals are able to easily recognize familiar faces. The vast majority of studies in the field of face recognition have nonetheless focused almost exclusively on frontal views of faces. Indeed, a number of authors have investigated the diagnostic facial features for the recognition of frontal views of faces previously encoded in this same view. However, the nature of the information useful for identity matching when the encoded face and test face differ in viewing angle remains mostly unexplored. The present study addresses this issue using individual differences and bubbles, a method that pinpoints the facial features effectively used in a visual categorization task. Our results indicate that the use of features located in the center of the face, the lower left portion of the nose area and the center of the mouth, are significantly associated with individual efficiency to generalize a face's identity across different viewpoints. However, as faces become more familiar, the reliance on this area decreases, while the diagnosticity of the eye region increases. This suggests that a certain distinction can be made between the visual mechanisms subtending viewpoint invariance and face recognition in the case of unfamiliar face identification. Our results further support the idea that the eye area may only come into play when the face stimulus is particularly familiar to the observer. © 2016 Elsevier Ltd.},
note = {Publisher: Elsevier Ltd},
keywords = {accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Joyal, C. C.; Jacob, L.; Cigna, M. -H.; Guay, J. -P.; Renaud, P.
Virtual faces expressing emotions: An initial concomitant and construct validity study Article de journal
Dans: Frontiers in Human Neuroscience, vol. 8, no SEP, p. 1–6, 2014, ISSN: 16625161, (Publisher: Frontiers Media S. A.).
Résumé | Liens | BibTeX | Étiquettes: adult, anger, article, computer program, construct validity, corrugator supercilii muscle, disgust, Electromyography, emotion, emotionality, face muscle, Facial Expression, Fear, female, gaze, happiness, human, human experiment, male, Middle Aged, muscle contraction, normal human, positive feedback, sadness, surprise, task performance, virtual reality, Young Adult, zygomatic major muscle
@article{joyal_virtual_2014,
title = {Virtual faces expressing emotions: An initial concomitant and construct validity study},
author = {C. C. Joyal and L. Jacob and M. -H. Cigna and J. -P. Guay and P. Renaud},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84933679803&doi=10.3389%2ffnhum.2014.00787&partnerID=40&md5=c51b26765fb1e2152cede99adcd519b0},
doi = {10.3389/fnhum.2014.00787},
issn = {16625161},
year = {2014},
date = {2014-01-01},
journal = {Frontiers in Human Neuroscience},
volume = {8},
number = {SEP},
pages = {1–6},
abstract = {Objectives: The goal of this study was to initially assess concomitants and construct validity of a newly developed set of virtual faces expressing six fundamental emotions (happiness, surprise, anger, sadness, fear, and disgust). Recognition rates, facial electromyography (zygomatic major and corrugator supercilii muscles), and regional gaze fixation latencies (eyes and mouth regions) were compared in 41 adult volunteers (20 ♂, 21 ♀) during the presentation of video clips depicting real vs. virtual adults expressing emotions. Background: Facial expressions of emotions represent classic stimuli for the studyofsocial cognition. Developing virtual dynamic facial expressions ofemotions, however, would open-up possibilities, both for fundamental and clinical research. For instance, virtual faces allow real-time Human–Computer retroactions between physiological measures and the virtual agent. Results: Emotions expressed by each set of stimuli were similarly recognized, both by men and women. Accordingly, both sets of stimuli elicited similar activation of facial muscles and similar ocular fixation times in eye regions from man and woman participants. Conclusion: Further validation studies can be performed with these virtual faces among clinical populations known to present social cognition difficulties. Brain–Computer Interface studies with feedback–feedforward interactions based on facial emotion expressions can also be conducted with these stimuli. © 2014 Joyal, Jacob, Cigna, Guay and Renaud.},
note = {Publisher: Frontiers Media S. A.},
keywords = {adult, anger, article, computer program, construct validity, corrugator supercilii muscle, disgust, Electromyography, emotion, emotionality, face muscle, Facial Expression, Fear, female, gaze, happiness, human, human experiment, male, Middle Aged, muscle contraction, normal human, positive feedback, sadness, surprise, task performance, virtual reality, Young Adult, zygomatic major muscle},
pubstate = {published},
tppubtype = {article}
}
Blais, C.; Roy, C.; Fiset, D.; Arguin, M.; Gosselin, F.
The eyes are not the window to basic emotions Article de journal
Dans: Neuropsychologia, vol. 50, no 12, p. 2830–2838, 2012, ISSN: 00283932.
Résumé | Liens | BibTeX | Étiquettes: adult, analytic method, article, association, association cortex, cognition, Cues, Discrimination (Psychology), discriminative stimulus, dynamic stimulus, emotion, Emotions, Eye, Facial Expression, female, Fixation, human, human experiment, Humans, male, Mouth, normal human, Ocular, Pattern Recognition, Photic Stimulation, static stimulus, task performance, Visual, visual discrimination, visual information, visual memory, visual system function, Young Adult
@article{blais_eyes_2012,
title = {The eyes are not the window to basic emotions},
author = {C. Blais and C. Roy and D. Fiset and M. Arguin and F. Gosselin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84865829171&doi=10.1016%2fj.neuropsychologia.2012.08.010&partnerID=40&md5=8a46d347f96ea9bd94bd161b6f1e8b92},
doi = {10.1016/j.neuropsychologia.2012.08.010},
issn = {00283932},
year = {2012},
date = {2012-01-01},
journal = {Neuropsychologia},
volume = {50},
number = {12},
pages = {2830–2838},
abstract = {Facial expressions are one of the most important ways to communicate our emotional state. In popular culture and in the scientific literature on face processing, the eye area is often conceived as a very important - if not the most important - cue for the recognition of facial expressions. In support of this, an underutilization of the eye area is often observed in clinical populations with a deficit in the recognition of facial expressions of emotions. Here, we used the Bubbles technique to verify which facial cue is the most important when it comes to discriminating between eight static and dynamic facial expressions (i.e., six basic emotions, pain and a neutral expression). We found that the mouth area is the most important cue for both static and dynamic facial expressions. We conducted an ideal observer analysis on the static expressions and determined that the mouth area is the most informative. However, we found an underutilization of the eye area by human participants in comparison to the ideal observer. We then demonstrated that the mouth area contains the most discriminative motions across expressions. We propose that the greater utilization of the mouth area by the human participants might come from remnants of the strategy the brain has developed with dynamic stimuli, and/or from a strategy whereby the most informative area is prioritized due to the limited capacity of the visuo-cognitive system. © 2012 Elsevier Ltd.},
keywords = {adult, analytic method, article, association, association cortex, cognition, Cues, Discrimination (Psychology), discriminative stimulus, dynamic stimulus, emotion, Emotions, Eye, Facial Expression, female, Fixation, human, human experiment, Humans, male, Mouth, normal human, Ocular, Pattern Recognition, Photic Stimulation, static stimulus, task performance, Visual, visual discrimination, visual information, visual memory, visual system function, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Bouchard, S.; Dumoulin, S.; Michaud, M.; Gougeon, V.
Telepresence experienced in videoconference varies according to emotions involved in videoconference sessions Article de journal
Dans: Annual Review of CyberTherapy and Telemedicine, vol. 9, no 1, p. 104–107, 2011, ISSN: 15548716.
Résumé | Liens | BibTeX | Étiquettes: adult, article, controlled study, emotion, female, human, human experiment, male, mental task, task performance, Telemedicine, telepresence, Verbal Behavior, videoconferencing
@article{bouchard_telepresence_2011-1,
title = {Telepresence experienced in videoconference varies according to emotions involved in videoconference sessions},
author = {S. Bouchard and S. Dumoulin and M. Michaud and V. Gougeon},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-83455200034&partnerID=40&md5=46d035fda13fc7d2b32c6b08341505a6},
issn = {15548716},
year = {2011},
date = {2011-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {9},
number = {1},
pages = {104–107},
abstract = {Previous studies have linked telepresence to the strength of the therapeutic relationship experienced during telepsychotherapy. This finding comes as a surprise for many people who have been involved in a teleconference meeting, where telepresence is often considered weak. The aim of this study is to (re)evaluate the impact of emotional engagement on telepresence. Participants were randomly assigned to one of the two conditions: (a) emotionally charged verbal exchange first (followed by a more neutral verbal exchange), or (b) emotionally neutral verbal exchange first (followed by an emotionally charged verbal exchange). A distraction task was performed between the two verbal exchanges in videofoncerence. Results showed that verbal exchanges involving stronger emotions increase telepresence. These results may explain why telepresence is so high in telepsychotherapy.},
keywords = {adult, article, controlled study, emotion, female, human, human experiment, male, mental task, task performance, Telemedicine, telepresence, Verbal Behavior, videoconferencing},
pubstate = {published},
tppubtype = {article}
}
Bouchard, S.; Dumoulin, S.; Michaud, M.; Gougeon, V.
Telepresence experienced in videoconference varies according to emotions involved in videoconference sessions Article de journal
Dans: Annual Review of CyberTherapy and Telemedicine, vol. 9, no 1, p. 104–107, 2011, ISSN: 15548716.
Résumé | Liens | BibTeX | Étiquettes: adult, age, Age Factors, article, clinical trial, controlled clinical trial, controlled study, emotion, Emotions, female, human, human experiment, Humans, male, mental task, Middle Aged, randomized controlled trial, sex difference, Sex Factors, task performance, Telemedicine, telepresence, Verbal Behavior, videoconferencing
@article{bouchard_telepresence_2011,
title = {Telepresence experienced in videoconference varies according to emotions involved in videoconference sessions},
author = {S. Bouchard and S. Dumoulin and M. Michaud and V. Gougeon},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-83455200034&partnerID=40&md5=46d035fda13fc7d2b32c6b08341505a6},
issn = {15548716},
year = {2011},
date = {2011-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {9},
number = {1},
pages = {104–107},
abstract = {Previous studies have linked telepresence to the strength of the therapeutic relationship experienced during telepsychotherapy. This finding comes as a surprise for many people who have been involved in a teleconference meeting, where telepresence is often considered weak. The aim of this study is to (re)evaluate the impact of emotional engagement on telepresence. Participants were randomly assigned to one of the two conditions: (a) emotionally charged verbal exchange first (followed by a more neutral verbal exchange), or (b) emotionally neutral verbal exchange first (followed by an emotionally charged verbal exchange). A distraction task was performed between the two verbal exchanges in videofoncerence. Results showed that verbal exchanges involving stronger emotions increase telepresence. These results may explain why telepresence is so high in telepsychotherapy.},
keywords = {adult, age, Age Factors, article, clinical trial, controlled clinical trial, controlled study, emotion, Emotions, female, human, human experiment, Humans, male, mental task, Middle Aged, randomized controlled trial, sex difference, Sex Factors, task performance, Telemedicine, telepresence, Verbal Behavior, videoconferencing},
pubstate = {published},
tppubtype = {article}
}
Blais, C.; Fiset, D.; Arguin, M.; Jolicoeur, P.; Bub, D.; Gosselin, F.
Reading between eye saccades Article de journal
Dans: PLoS ONE, vol. 4, no 7, 2009, ISSN: 19326203.
Résumé | Liens | BibTeX | Étiquettes: adult, article, Computer Simulation, eye tracking, human, human experiment, Humans, letter, normal human, Reading, Saccades, saccadic eye movement, skill, spatial discrimination, task performance, visual stimulation, word recognition
@article{blais_reading_2009,
title = {Reading between eye saccades},
author = {C. Blais and D. Fiset and M. Arguin and P. Jolicoeur and D. Bub and F. Gosselin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-68149091880&doi=10.1371%2fjournal.pone.0006448&partnerID=40&md5=661dc6218ea707a1934bf90a66d57051},
doi = {10.1371/journal.pone.0006448},
issn = {19326203},
year = {2009},
date = {2009-01-01},
journal = {PLoS ONE},
volume = {4},
number = {7},
abstract = {Background: Skilled adult readers, in contrast to beginners, show no or little increase in reading latencies as a function of the number of letters in words up to seven letters. The information extraction strategy underlying such efficiency in word identification is still largely unknown, and methods that allow tracking of the letter information extraction through time between eye saccades are needed to fully address this question. Methodology/Principal Findings: The present study examined the use of letter information during reading, by means of the Bubbles technique. Ten participants each read 5,000 five-letter French words sampled in space-time within a 200 ms window. On the temporal dimension, our results show that two moments are especially important during the information extraction process. On the spatial dimension, we found a bias for the upper half of words. We also show for the first time that letter positions four, one, and three are particularly important for the identification of five-letter words. Conclusions/Significance: Our findings are consistent with either a partially parallel reading strategy or an optimal serial reading strategy. We show using computer simulations that this serial reading strategy predicts an absence of a word-length effect for words from four- to seven letters in length. We believe that the Bubbles technique will play an important role in further examining the nature of reading between eye saccades. © 2009 Blais et al.},
keywords = {adult, article, Computer Simulation, eye tracking, human, human experiment, Humans, letter, normal human, Reading, Saccades, saccadic eye movement, skill, spatial discrimination, task performance, visual stimulation, word recognition},
pubstate = {published},
tppubtype = {article}
}
Fiset, D.; Blais, C.; Arguin, M.; Tadros, K.; Éthier-Majcher, C.; Bub, D.; Gosselin, F.
The spatio-temporal dynamics of visual letter recognition Article de journal
Dans: Cognitive Neuropsychology, vol. 26, no 1, p. 23–35, 2009, ISSN: 02643294.
Résumé | Liens | BibTeX | Étiquettes: article, bootstrapping, Bubbles technique, Discrimination Learning, human, Humans, Image analysis, linear regression analysis, methodology, Models, Nonlinear Dynamics, nonlinear system, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, Psychological, psychological model, reaction time, recognition, Recognition (Psychology), task performance, temporal summation, time, Time Factors, Visual, word recognition
@article{fiset_spatio-temporal_2009,
title = {The spatio-temporal dynamics of visual letter recognition},
author = {D. Fiset and C. Blais and M. Arguin and K. Tadros and C. Éthier-Majcher and D. Bub and F. Gosselin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67649525418&doi=10.1080%2f02643290802421160&partnerID=40&md5=bca7bda93d59994f2679faff9d93f46a},
doi = {10.1080/02643290802421160},
issn = {02643294},
year = {2009},
date = {2009-01-01},
journal = {Cognitive Neuropsychology},
volume = {26},
number = {1},
pages = {23–35},
abstract = {We applied the Bubbles technique to reveal directly the spatio-temporal features of uppercase Arial letter identification. We asked four normal readers to each identify 26,000 letters that were randomly sampled in space and time; afterwards, we performed multiple linear regressions on the participant's response accuracy and the space-time samples. We contend that each cluster of connected significant regression coefficients is a letter feature. To bridge the gap between the letter identification literature and this experiment, we also determined the relative importance of the features proposed in the letter identification literature. Results show clear modulations of the relative importance of the letter features of some letters across time, demonstrating that letter features are not always extracted simultaneously at constant speeds. Furthermore, of all the feature classes proposed in the literature, line terminations and horizontals appear to be the two most important for letter identification. © 2008 Psychology Press, an imprint of the Taylor & Francis Group.},
keywords = {article, bootstrapping, Bubbles technique, Discrimination Learning, human, Humans, Image analysis, linear regression analysis, methodology, Models, Nonlinear Dynamics, nonlinear system, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, Psychological, psychological model, reaction time, recognition, Recognition (Psychology), task performance, temporal summation, time, Time Factors, Visual, word recognition},
pubstate = {published},
tppubtype = {article}
}