

de Recherche et d’Innovation
en Cybersécurité et Société
Allaoui, M. L.; Allili, M. S.; Belaid, A.
HA-U3Net: A modality-agnostic framework for 3D medical image segmentation using nested V-Net structure and hybrid attention Article de journal
Dans: Knowledge-Based Systems, vol. 327, 2025, ISSN: 09507051 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: 3D medical image, 3D medical image segmentation, Diagnosis, Diagnosis planning, Disease diagnosis, Disease treatment, Generalization capability, Image segmentation, Magnetic resonance imaging, Medical image processing, Medical image segmentation, Nested volume-structure, Net structures, Self hybrid attention, Structures (built objects)
@article{allaoui_ha-u3net_2025,
title = {HA-U3Net: A modality-agnostic framework for 3D medical image segmentation using nested V-Net structure and hybrid attention},
author = {M. L. Allaoui and M. S. Allili and A. Belaid},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105011370963&doi=10.1016%2Fj.knosys.2025.114127&partnerID=40&md5=d98a109f015445adb3001bb4017bf953},
doi = {10.1016/j.knosys.2025.114127},
issn = {09507051 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Knowledge-Based Systems},
volume = {327},
abstract = {3D medical image segmentation is essential for disease diagnosis and treatment planning across a wide range of imaging modalities (e.g., MRI, CT, ultrasound, and PET). However, modality-specific challenges, such as noise, artifacts, low contrast, and anatomical variability, along with the presence of small lesions and fuzzy boundaries, hinder the generalization capability of existing segmentation models. In this work, we present HA-U3Net, a novel 3D U-Net-based model designed to address these limitations through a stepwise approach. First, we introduce a deeply nested U3-shaped structure built upon 3D V-Net modules, enabling multi-scale hierarchical representation learning. Second, we integrate a hybrid attention mechanism combining spatial and channel-wise attention to enhance salient features extraction and the delineation of small or poorly defined structures. Third, we demonstrate the cross-modality generalization capabilities of HA-U3Net through extensive evaluations on several datasets, where our model consistently outperforms baseline methods. Finally, we propose a lightweight variant, U3Mamba, reducing computational complexity while maintaining high performance. © 2025 Elsevier B.V.},
keywords = {3D medical image, 3D medical image segmentation, Diagnosis, Diagnosis planning, Disease diagnosis, Disease treatment, Generalization capability, Image segmentation, Magnetic resonance imaging, Medical image processing, Medical image segmentation, Nested volume-structure, Net structures, Self hybrid attention, Structures (built objects)},
pubstate = {published},
tppubtype = {article}
}
Bérubé, A.; Pétrin, R.; Boudreault, M.; Marcotte-Beaumier, G.; Blais, C.
Childhood maltreatment influences parental mimicry of children's emotional facial expressions Article de journal
Dans: Child Abuse and Neglect, vol. 170, 2025, ISSN: 01452134 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Adolescent, adult, Adult Survivors of Child Abuse, anger, article, Child, Child Abuse, child abuse survivor, child parent relation, childhood maltreatment, Childhood Trauma Questionnaire, controlled study, Diagnosis, disgust, emotion, Emotional facial expressions, emotional neglect, Emotions, Facial Expression, female, human, Humans, major clinical study, male, Mimicry, neglect, parent, Parent-Child Relations, Parenting, Parents, path analysis, physical abuse, psychological functioning, psychology, questionnaire, sadness, sexual abuse, social bonding, Surveys and Questionnaires, Young Adult
@article{berube_childhood_2025,
title = {Childhood maltreatment influences parental mimicry of children's emotional facial expressions},
author = {A. Bérubé and R. Pétrin and M. Boudreault and G. Marcotte-Beaumier and C. Blais},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105020986193&doi=10.1016%2Fj.chiabu.2025.107787&partnerID=40&md5=18593b82f701fc76ad054419d48dfc69},
doi = {10.1016/j.chiabu.2025.107787},
issn = {01452134 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Child Abuse and Neglect},
volume = {170},
abstract = {Background: Childhood maltreatment can disrupt socio-emotional functioning, potentially influencing how parents respond to children's emotional facial expressions. Mimicry, an automatic reaction to others' facial expressions, is a critical mechanism for social bonding and affiliation in parent-child relationships. However, the effects of childhood maltreatment on parental mimicry remain underexplored. Objective: This study examined the relationship between different forms of childhood maltreatment and parents' mimicry of children's emotional facial expressions. Participants and setting: Fifty-seven parents participated in an emotion recognition task conducted either at a local community organization or at the university laboratory. Methods: Parents' facial reactions were recorded and analyzed using FaceReader software to detect mimicry. The Childhood Trauma Questionnaire (CTQ) assessed parental history of maltreatment. A path analysis model was conducted to evaluate the associations between forms of childhood maltreatment and parental mimicry. Results: A history of physical abuse predicted increased expressions of anger, while physical neglect was linked to reduced anger but heightened disgust. Emotional and sexual abuse were associated with diminished mimicry of sadness, whereas emotional neglect predicted enhanced sadness mimicry. Conclusions: Findings suggest that childhood maltreatment alters parents' facial reactions to children's emotional facial expressions, potentially impacting parental sensitivity. © 2025},
keywords = {Adolescent, adult, Adult Survivors of Child Abuse, anger, article, Child, Child Abuse, child abuse survivor, child parent relation, childhood maltreatment, Childhood Trauma Questionnaire, controlled study, Diagnosis, disgust, emotion, Emotional facial expressions, emotional neglect, Emotions, Facial Expression, female, human, Humans, major clinical study, male, Mimicry, neglect, parent, Parent-Child Relations, Parenting, Parents, path analysis, physical abuse, psychological functioning, psychology, questionnaire, sadness, sexual abuse, social bonding, Surveys and Questionnaires, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Allaoui, M. L.; Allili, M. S.
MixLVMM: A Mixture of Lightweight Vision Mamba Model for Enhancing Skin Lesion Segmentation Across High Tone Variability Article de journal
Dans: IEEE Access, vol. 13, p. 121234–121249, 2025, ISSN: 21693536 (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Attention mechanism, Attention mechanisms, Computational efficiency, Critical challenges, Dermatology, Diagnosis, Image segmentation, Lesion segmentations, Lung cancer, Mixture of experts model, Mixture-of-experts model, Segmentation performance, Skin lesion, Skin lesion segmentation, Skin/lesion tone variability, Vision mamba
@article{allaoui_mixlvmm_2025,
title = {MixLVMM: A Mixture of Lightweight Vision Mamba Model for Enhancing Skin Lesion Segmentation Across High Tone Variability},
author = {M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105012036322&doi=10.1109%2FACCESS.2025.3588476&partnerID=40&md5=1cf51dcf43653e1677ad36a1360392ac},
doi = {10.1109/ACCESS.2025.3588476},
issn = {21693536 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {IEEE Access},
volume = {13},
pages = {121234–121249},
abstract = {Accurate skin lesion segmentation remains a critical challenge in automated dermatological diagnosis due to heterogeneous lesion presentations, ambiguous boundaries, imaging artifacts, and significant variability in skin and lesion tones across diverse populations. Current segmentation methods inadequately address these multifaceted complexities, particularly failing to handle extreme tone variations that can lead to diagnostic bias. To address these limitations, we present the Mixture of Lightweight Vision Mamba Model (MixLVMM), a novel expert-based framework that enhances segmentation performance across high tone variability through specialized processing. Our approach employs a Siamese network with triplet loss as a gate mechanism to categorize lesions based on tonal characteristics, routing each image to specialized Vision Mamba Model (VMM) experts optimized for specific lesion categories. Each expert utilizes a U-shaped architecture incorporating Focused Vision Mamba blocks and Adaptive Salient Region Attention modules to capture lesion-specific features while maintaining computational efficiency. Comprehensive evaluation on ISIC and PH2 datasets demonstrates that MixLVMM achieves superior segmentation performance with an average Dice coefficient of 93%, surpassing state-of-the-art methods while maintaining efficiency with only 2.5M parameters. These results establish MixLVMM as a robust solution for addressing tone-related segmentation challenges in clinical dermatology, offering both high accuracy and practical deployment feasibility for real-world applications. © 2013 IEEE.},
keywords = {Attention mechanism, Attention mechanisms, Computational efficiency, Critical challenges, Dermatology, Diagnosis, Image segmentation, Lesion segmentations, Lung cancer, Mixture of experts model, Mixture-of-experts model, Segmentation performance, Skin lesion, Skin lesion segmentation, Skin/lesion tone variability, Vision mamba},
pubstate = {published},
tppubtype = {article}
}
Gingras, F.; Estéphan, A.; Fiset, D.; Lingnan, H.; Caldara, R.; Blais, C.
Differences in eye movements for face recognition between Canadian and Chinese participants are not modulated by social orientation Article de journal
Dans: PLoS ONE, vol. 18, no 12 December, 2023, ISSN: 19326203 (ISSN), (Publisher: Public Library of Science).
Résumé | Liens | BibTeX | Étiquettes: adult, article, Asian, Asian People, Canada, Canadian, China, Chinese, clinical article, Diagnosis, East Asian, eye movement, Eye movements, Facial Recognition, female, human, human experiment, Humans, male, North American, Orientation, questionnaire, social value, vision
@article{gingras_differences_2023,
title = {Differences in eye movements for face recognition between Canadian and Chinese participants are not modulated by social orientation},
author = {F. Gingras and A. Estéphan and D. Fiset and H. Lingnan and R. Caldara and C. Blais},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85179766751&doi=10.1371%2fjournal.pone.0295256&partnerID=40&md5=34499ca3a094ccf3937f07a1fb177c82},
doi = {10.1371/journal.pone.0295256},
issn = {19326203 (ISSN)},
year = {2023},
date = {2023-01-01},
journal = {PLoS ONE},
volume = {18},
number = {12 December},
publisher = {Public Library of Science},
abstract = {Face recognition strategies do not generalize across individuals. Many studies have reported robust cultural differences between West Europeans/North Americans and East Asians in eye movement strategies during face recognition. The social orientation hypothesis posits that individualistic vs. collectivistic (IND/COL) value systems, respectively defining West European/North American and East Asian societies, would be at the root of many cultural differences in visual perception. Whether social orientation is also responsible for such cultural contrast in face recognition remains to be clarified. To this aim, we conducted two experiments with West European/North American and Chinese observers. In Experiment 1, we probed the existence of a link between IND/COL social values and eye movements during face recognition, by using an IND/COL priming paradigm. In Experiment 2, we dissected the latter relationship in greater depth, by using two IND/COL questionnaires, including subdimensions to those concepts. In both studies, cultural differences in fixation patterns were revealed between West European/North American and East Asian observers. Priming IND/ COL values did not modulate eye movement visual sampling strategies, and only specific subdimensions of the IND/COL questionnaires were associated with distinct eye-movement patterns. Altogether, we show that the typical contrast between IND/COL cannot fully account for cultural differences in eye movement strategies for face recognition. Cultural differences in eye movements for faces might originate from mechanisms distinct from social orientation. © 2023 Gingras et al. This is an open access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.},
note = {Publisher: Public Library of Science},
keywords = {adult, article, Asian, Asian People, Canada, Canadian, China, Chinese, clinical article, Diagnosis, East Asian, eye movement, Eye movements, Facial Recognition, female, human, human experiment, Humans, male, North American, Orientation, questionnaire, social value, vision},
pubstate = {published},
tppubtype = {article}
}
Canale, N.; Cornil, A.; Giroux, I.; Bouchard, S.; Billieux, J.
Probing gambling urge as a state construct: Evidence from a sample of community gamblers Article de journal
Dans: Psychology of Addictive Behaviors, vol. 33, no 2, p. 154–161, 2019, ISSN: 0893164X, (Publisher: Educational Publishing Foundation).
Résumé | Liens | BibTeX | Étiquettes: Adolescent, adult, aged, anticipation, article, case report, clinical article, clinical practice, confirmatory factor analysis, craving, Cross-Sectional Studies, cross-sectional study, devices, Diagnosis, exploratory factor analysis, Factor Analysis, female, Gambling, guided imagery, human, Humans, impulsiveness, male, Middle Aged, procedures, Psychiatric Status Rating Scales, psychological rating scale, Psychometrics, psychometry, reliability, reproducibility, Reproducibility of Results, Statistical, Young Adult
@article{canale_probing_2019,
title = {Probing gambling urge as a state construct: Evidence from a sample of community gamblers},
author = {N. Canale and A. Cornil and I. Giroux and S. Bouchard and J. Billieux},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059626617&doi=10.1037%2fadb0000438&partnerID=40&md5=7d3a08b0815d09db467360f3a385f57d},
doi = {10.1037/adb0000438},
issn = {0893164X},
year = {2019},
date = {2019-01-01},
journal = {Psychology of Addictive Behaviors},
volume = {33},
number = {2},
pages = {154–161},
publisher = {Educational Publishing Foundation},
abstract = {Little effort has been made to systematically test the psychometric properties of the Gambling Craving Scale (GACS; Young & Wohl, 2009). The GACS is adapted from the Questionnaire on Smoking Urges (Tiffany & Drobes, 1991) and thus measures gambling-related urge. Crucially, the validation of scales assessing gambling urge is complex because this construct is better conceptualized as a state (a transient and contextdetermined phenomenon). In the present study, we tested the psychometric properties of the French version of the GACS with 2 independent samples of community gamblers following an induction procedure delivered through an audio-guided imagery sequence aimed at promoting gambling urge. This procedure was specifically used to ensure the assessment of gambling urge as a state variable. Participants also completed measures of gambling severity, gambling cognitions and motives, impulsivity, and affect. Confirmatory factor analysis showed that the original 3-factor solution (anticipation, desire, relief) did not fit the data well. Additional exploratory factor analysis suggested instead a 2-factor solution: An intention and desire to gamble dimension and a relief dimension. The factorial structure resulting from the exploratory factor analysis was tested with confirmatory factor analysis in a second independent sample, resulting in an acceptable fit. The 2 dimensions presented good internal reliability and correlated differentially with the other study's variables. The current study showed that, similar to what has been reported for substance-related urges, gambling urges are adequately probed with a bidimensional model. The findings suggest that the French GACS has good psychometric properties, legitimizing its use in research and clinical practice. © 2019 American Psychological Association.},
note = {Publisher: Educational Publishing Foundation},
keywords = {Adolescent, adult, aged, anticipation, article, case report, clinical article, clinical practice, confirmatory factor analysis, craving, Cross-Sectional Studies, cross-sectional study, devices, Diagnosis, exploratory factor analysis, Factor Analysis, female, Gambling, guided imagery, human, Humans, impulsiveness, male, Middle Aged, procedures, Psychiatric Status Rating Scales, psychological rating scale, Psychometrics, psychometry, reliability, reproducibility, Reproducibility of Results, Statistical, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Marschall-Lévesque, S.; Rouleau, J. -L.; Renaud, P.
Increasing Valid Profiles in Phallometric Assessment of Sex Offenders with Child Victims: Combining the Strengths of Audio Stimuli and Synthetic Characters Article de journal
Dans: Archives of Sexual Behavior, vol. 47, no 2, p. 417–428, 2018, ISSN: 00040002, (Publisher: Springer New York LLC).
Résumé | Liens | BibTeX | Étiquettes: Acoustic Stimulation, Adolescent, adult, auditory stimulation, Child, clinical article, controlled study, crime victim, Crime Victims, Diagnosis, DSM-5, Feedback, female, human, Humans, male, Pedophilia, Penis, Photic Stimulation, photostimulation, physiology, Plethysmography, procedures, psychology, receiver operating characteristic, Sensory, sensory feedback, sex determination, Sex Offenses, sexual crime, stimulus, victim, Young Adult
@article{marschall-levesque_increasing_2018,
title = {Increasing Valid Profiles in Phallometric Assessment of Sex Offenders with Child Victims: Combining the Strengths of Audio Stimuli and Synthetic Characters},
author = {S. Marschall-Lévesque and J. -L. Rouleau and P. Renaud},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85033397289&doi=10.1007%2fs10508-017-1053-y&partnerID=40&md5=bbb82341dfebd50938aa29358f8e0e69},
doi = {10.1007/s10508-017-1053-y},
issn = {00040002},
year = {2018},
date = {2018-01-01},
journal = {Archives of Sexual Behavior},
volume = {47},
number = {2},
pages = {417–428},
publisher = {Springer New York LLC},
abstract = {Penile plethysmography (PPG) is a measure of sexual interests that relies heavily on the stimuli it uses to generate valid results. Ethical considerations surrounding the use of real images in PPG have further limited the content admissible for these stimuli. To palliate this limitation, the current study aimed to combine audio and visual stimuli by incorporating computer-generated characters to create new stimuli capable of accurately classifying sex offenders with child victims, while also increasing the number of valid profiles. Three modalities (audio, visual, and audiovisual) were compared using two groups (15 sex offenders with child victims and 15 non-offenders). Both the new visual and audiovisual stimuli resulted in a 13% increase in the number of valid profiles at 2.5 mm, when compared to the standard audio stimuli. Furthermore, the new audiovisual stimuli generated a 34% increase in penile responses. All three modalities were able to discriminate between the two groups by their responses to the adult and child stimuli. Lastly, sexual interest indices for all three modalities could accurately classify participants in their appropriate groups, as demonstrated by ROC curve analysis (i.e., audio AUC = .81, 95% CI [.60, 1.00]; visual AUC = .84, 95% CI [.66, 1.00], and audiovisual AUC = .83, 95% CI [.63, 1.00]). Results suggest that computer-generated characters allow accurate discrimination of sex offenders with child victims and can be added to already validated stimuli to increase the number of valid profiles. The implications of audiovisual stimuli using computer-generated characters and their possible use in PPG evaluations are also discussed. © 2017, Springer Science+Business Media, LLC.},
note = {Publisher: Springer New York LLC},
keywords = {Acoustic Stimulation, Adolescent, adult, auditory stimulation, Child, clinical article, controlled study, crime victim, Crime Victims, Diagnosis, DSM-5, Feedback, female, human, Humans, male, Pedophilia, Penis, Photic Stimulation, photostimulation, physiology, Plethysmography, procedures, psychology, receiver operating characteristic, Sensory, sensory feedback, sex determination, Sex Offenses, sexual crime, stimulus, victim, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Lord-Gauthier, J.; Montigny, F.; Bouchard, S.
Le stress au travail. Savoir y faire face Article de journal
Dans: Perspective infirmiere : revue officielle de l'Ordre des infirmieres et infirmiers du Quebec, vol. 13, no 5, p. 21–25, 2016, ISSN: 17081890.
Liens | BibTeX | Étiquettes: Adaptation, Burnout, Canada, coping behavior, Diagnosis, epidemiology, etiology, female, human, Humans, job stress, male, mental stress, nurse, Nurses, Nursing, Occupational Stress, physiology, prevention and control, Professional, Psychological, psychology, questionnaire, Retrospective Studies, retrospective study, statistics and numerical data, Stress, Surveys and Questionnaires, Therapy, Workplace
@article{lord-gauthier_stress_2016,
title = {Le stress au travail. Savoir y faire face},
author = {J. Lord-Gauthier and F. Montigny and S. Bouchard},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045181367&partnerID=40&md5=6c31d6b363aecd894805c5c34005f804},
issn = {17081890},
year = {2016},
date = {2016-01-01},
journal = {Perspective infirmiere : revue officielle de l'Ordre des infirmieres et infirmiers du Quebec},
volume = {13},
number = {5},
pages = {21–25},
keywords = {Adaptation, Burnout, Canada, coping behavior, Diagnosis, epidemiology, etiology, female, human, Humans, job stress, male, mental stress, nurse, Nurses, Nursing, Occupational Stress, physiology, prevention and control, Professional, Psychological, psychology, questionnaire, Retrospective Studies, retrospective study, statistics and numerical data, Stress, Surveys and Questionnaires, Therapy, Workplace},
pubstate = {published},
tppubtype = {article}
}
Corno, G.; Bouchard, S.; Forget, H.
Usability assessment of the virtual multitasking test (V-MT) for elderly people Article de journal
Dans: Annual Review of CyberTherapy and Telemedicine, vol. 12, p. 168–172, 2014, ISSN: 15548716, (Publisher: Virtual reality med institute).
Résumé | Liens | BibTeX | Étiquettes: 80 and over, aged, behavior, cognition, cognitive defect, Cognitive Dysfunction, Cognitive functions, computer assisted diagnosis, Computer-Assisted, Diagnosis, Elderly, Elderly people, female, Geriatric Assessment, human, human experiment, Humans, male, Middle Aged, Multitasking, Multitasking Behavior, Older users, Presence, procedures, Psychological research, Usability, Usability assessment, very elderly, virtual reality
@article{corno_usability_2014,
title = {Usability assessment of the virtual multitasking test (V-MT) for elderly people},
author = {G. Corno and S. Bouchard and H. Forget},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928056933&partnerID=40&md5=b0e0cc4d1c5203678a24bbdcf36d7686},
issn = {15548716},
year = {2014},
date = {2014-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
volume = {12},
pages = {168–172},
publisher = {Virtual reality med institute},
abstract = {In the last decades an increasing number of psychological researches have used Virtual Reality (VR) technology in different fields. Nevertheless, few studies used Virtual Environments (VEs) with a sample of older users. The aim of the present study is to assess the usability of the Virtual Multitasking Test (V-MT), which consists in a virtual apartment created to assess cognitive functions in elderly people. This study reports the preliminary results to support the development of a VE in which elderly people feel present and fully immersed. © 2014, Virtual reality med institute. All rights reserved.},
note = {Publisher: Virtual reality med institute},
keywords = {80 and over, aged, behavior, cognition, cognitive defect, Cognitive Dysfunction, Cognitive functions, computer assisted diagnosis, Computer-Assisted, Diagnosis, Elderly, Elderly people, female, Geriatric Assessment, human, human experiment, Humans, male, Middle Aged, Multitasking, Multitasking Behavior, Older users, Presence, procedures, Psychological research, Usability, Usability assessment, very elderly, virtual reality},
pubstate = {published},
tppubtype = {article}
}



