

de Recherche et d’Innovation
en Cybersécurité et Société
Nouboukpo, A.; Allili, M. S.
Spatially-coherent segmentation using hierarchical gaussian mixture reduction based on cauchy-schwarz divergence Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 11662 LNCS, pp. 388–396, 2019, ISSN: 03029743, (ISBN: 9783030272012 Publisher: Springer Verlag).
Abstract | Links | BibTeX | Tags: Cauchy-Schwarz divergence, Foreground segmentation, Gaussian distribution, Gaussian Mixture Model, Gaussian mixture reduction, Image analysis, Image segmentation, Mixture reductions, Reduction algorithms, Reduction techniques, State-of-art methods
@article{nouboukpo_spatially-coherent_2019,
title = {Spatially-coherent segmentation using hierarchical gaussian mixture reduction based on cauchy-schwarz divergence},
author = {A. Nouboukpo and M. S. Allili},
editor = {Campilho A. Yu A. Karray F.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85071452890&doi=10.1007%2f978-3-030-27202-9_35&partnerID=40&md5=2689080f7b2410040a038f080ef93bfa},
doi = {10.1007/978-3-030-27202-9_35},
issn = {03029743},
year = {2019},
date = {2019-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {11662 LNCS},
pages = {388–396},
abstract = {Gaussian mixture models (GMM) are widely used for image segmentation. The bigger the number in the mixture, the higher will be the data likelihood. Unfortunately, too many GMM components leads to model overfitting and poor segmentation. Thus, there has been a growing interest in GMM reduction algorithms that rely on component fusion while preserving the structure of data. In this work, we present an algorithm based on a closed-form Cauchy-Schwarz divergence for GMM reduction. Contrarily to previous GMM reduction techniques which a single GMM, our approach can lead to multiple small GMMs describing more accurately the structure of the data. Experiments on image foreground segmentation demonstrate the effectiveness of our proposed model compared to state-of-art methods. © Springer Nature Switzerland AG 2019.},
note = {ISBN: 9783030272012
Publisher: Springer Verlag},
keywords = {Cauchy-Schwarz divergence, Foreground segmentation, Gaussian distribution, Gaussian Mixture Model, Gaussian mixture reduction, Image analysis, Image segmentation, Mixture reductions, Reduction algorithms, Reduction techniques, State-of-art methods},
pubstate = {published},
tppubtype = {article}
}
Audet, F.; Allili, M. S.; Cretu, A. -M.
Salient object detection in images by combining objectness clues in the RGBD space Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 10317 LNCS, pp. 247–255, 2017, ISSN: 03029743, (ISBN: 9783319598758 Publisher: Springer Verlag).
Abstract | Links | BibTeX | Tags: Color, Color information, Depth information, Image analysis, Multistage approach, Object detection, Object recognition, Potential region, Real-world image, Salient object detection, Salient objects, Statistical distribution, Voting machines
@article{audet_salient_2017,
title = {Salient object detection in images by combining objectness clues in the RGBD space},
author = {F. Audet and M. S. Allili and A. -M. Cretu},
editor = {Campilho A. Karray F. Cheriet F.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85022229105&doi=10.1007%2f978-3-319-59876-5_28&partnerID=40&md5=d78eb69cecd0a34ca2d517cfee44ef54},
doi = {10.1007/978-3-319-59876-5_28},
issn = {03029743},
year = {2017},
date = {2017-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {10317 LNCS},
pages = {247–255},
abstract = {We propose a multi-stage approach for salient object detection in natural images which incorporates color and depth information. In the first stage, color and depth channels are explored separately through objectness-based measures to detect potential regions containing salient objects. This procedure produces a list of bounding boxes which are further filtered and refined using statistical distributions. The retained candidates from both color and depth channels are then combined using a voting system. The final stage consists of combining the extracted candidates from color and depth channels using a voting system that produces a final map narrowing the location of the salient object. Experimental results on real-world images have proved the performance of the proposed method in comparison with the case where only color information is used. © Springer International Publishing AG 2017.},
note = {ISBN: 9783319598758
Publisher: Springer Verlag},
keywords = {Color, Color information, Depth information, Image analysis, Multistage approach, Object detection, Object recognition, Potential region, Real-world image, Salient object detection, Salient objects, Statistical distribution, Voting machines},
pubstate = {published},
tppubtype = {article}
}
Filali, I.; Allili, M. S.; Benblidia, N.
Multi-graph based salient object detection Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 9730, pp. 318–324, 2016, ISSN: 03029743, (ISBN: 9783319415000 Publisher: Springer Verlag).
Abstract | Links | BibTeX | Tags: Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects
@article{filali_multi-graph_2016,
title = {Multi-graph based salient object detection},
author = {I. Filali and M. S. Allili and N. Benblidia},
editor = {Karray F. Campilho A. Campilho A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978804496&doi=10.1007%2f978-3-319-41501-7_36&partnerID=40&md5=eb519756d2e72245e4131d5dc0b416b5},
doi = {10.1007/978-3-319-41501-7_36},
issn = {03029743},
year = {2016},
date = {2016-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {9730},
pages = {318–324},
abstract = {We propose a multi-layer graph based approach for salient object detection in natural images. Starting from a set of multi-scale image decomposition using superpixels, we propose an objective function optimized on a multi-layer graph structure to diffuse saliency from image borders to salient objects. After isolating the object kernel, we enhance the accuracy of our saliency maps through an objectness-like based refinement approach. Beside its simplicity, our algorithm yields very accurate salient objects with clear boundaries. Experiments have shown that our approach outperforms several recent methods dealing with salient object detection. © Springer International Publishing Switzerland 2016.},
note = {ISBN: 9783319415000
Publisher: Springer Verlag},
keywords = {Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects},
pubstate = {published},
tppubtype = {article}
}
Royer, J.; Blais, C.; Barnabé-Lortie, V.; Carré, M.; Leclerc, J.; Fiset, D.
Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes! Journal Article
In: Vision Research, vol. 123, pp. 33–40, 2016, ISSN: 00426989 (ISSN), (Publisher: Elsevier Ltd).
Abstract | Links | BibTeX | Tags: accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult
@article{royer_efficient_2016,
title = {Efficient visual information for unfamiliar face matching despite viewpoint variations: It's not in the eyes!},
author = {J. Royer and C. Blais and V. Barnabé-Lortie and M. Carré and J. Leclerc and D. Fiset},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84968779426&doi=10.1016%2fj.visres.2016.04.004&partnerID=40&md5=4c63f6eea279f7322c9af23ae9ed22c1},
doi = {10.1016/j.visres.2016.04.004},
issn = {00426989 (ISSN)},
year = {2016},
date = {2016-01-01},
journal = {Vision Research},
volume = {123},
pages = {33–40},
abstract = {Faces are encountered in highly diverse angles in real-world settings. Despite this considerable diversity, most individuals are able to easily recognize familiar faces. The vast majority of studies in the field of face recognition have nonetheless focused almost exclusively on frontal views of faces. Indeed, a number of authors have investigated the diagnostic facial features for the recognition of frontal views of faces previously encoded in this same view. However, the nature of the information useful for identity matching when the encoded face and test face differ in viewing angle remains mostly unexplored. The present study addresses this issue using individual differences and bubbles, a method that pinpoints the facial features effectively used in a visual categorization task. Our results indicate that the use of features located in the center of the face, the lower left portion of the nose area and the center of the mouth, are significantly associated with individual efficiency to generalize a face's identity across different viewpoints. However, as faces become more familiar, the reliance on this area decreases, while the diagnosticity of the eye region increases. This suggests that a certain distinction can be made between the visual mechanisms subtending viewpoint invariance and face recognition in the case of unfamiliar face identification. Our results further support the idea that the eye area may only come into play when the face stimulus is particularly familiar to the observer. © 2016 Elsevier Ltd.},
note = {Publisher: Elsevier Ltd},
keywords = {accuracy, adult, article, association, attention, Bubbles, Evoked Potentials, eye fixation, Face, face profile, face recognition, Facial Recognition, facies, female, Fixation, human, human experiment, Humans, Image analysis, Individual differences, male, Ocular, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, procedures, Psychophysics, recognition, Recognition (Psychology), regression analysis, task performance, unfamiliar face matching, viewpoint variation, Viewpoint variations, Visual, visual discrimination, visual evoked potential, visual information, visual memory, visual stimulation, visual system parameters, Young Adult},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Likelihood-based feature relevance for figure-ground segmentation in images and videos Journal Article
In: Neurocomputing, vol. 167, pp. 658–670, 2015, ISSN: 09252312, (Publisher: Elsevier).
Abstract | Links | BibTeX | Tags: accuracy, algorithm, article, calculation, Feature relevance, Figure-ground segmentations, Gaussian mixture model (GMMs), Image analysis, Image Enhancement, image quality, Image segmentation, Level Set, linear system, mathematical analysis, mathematical model, Negative examples, priority journal, Video cameras, videorecording
@article{allili_likelihood-based_2015,
title = {Likelihood-based feature relevance for figure-ground segmentation in images and videos},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84952631642&doi=10.1016%2fj.neucom.2015.04.015&partnerID=40&md5=833948d0784e0dc42c2245b9343971dd},
doi = {10.1016/j.neucom.2015.04.015},
issn = {09252312},
year = {2015},
date = {2015-01-01},
journal = {Neurocomputing},
volume = {167},
pages = {658–670},
abstract = {We propose an efficient method for image/video figure-ground segmentation using feature relevance (FR) and active contours. Given a set of positive and negative examples of a specific foreground (an object of interest (OOI) in an image or a tracked objet in a video), we first learn the foreground distribution model and its characteristic features that best discriminate it from its contextual background. For this goal, an objective function based on feature likelihood ratio is proposed for supervised FR computation. FR is then incorporated in foreground segmentation of new images and videos using level sets and energy minimization. We show the effectiveness of our approach on several examples of image/video figure-ground segmentation. © 2015 Elsevier B.V.},
note = {Publisher: Elsevier},
keywords = {accuracy, algorithm, article, calculation, Feature relevance, Figure-ground segmentations, Gaussian mixture model (GMMs), Image analysis, Image Enhancement, image quality, Image segmentation, Level Set, linear system, mathematical analysis, mathematical model, Negative examples, priority journal, Video cameras, videorecording},
pubstate = {published},
tppubtype = {article}
}
Pedrocca, P. J.; Allili, M. S.
Real-time people detection in videos using geometrical features and adaptive boosting Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 6753 LNCS, no. PART 1, pp. 314–324, 2011, ISSN: 03029743, (ISBN: 9783642215926 Place: Burnaby, BC).
Abstract | Links | BibTeX | Tags: Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences
@article{pedrocca_real-time_2011,
title = {Real-time people detection in videos using geometrical features and adaptive boosting},
author = {P. J. Pedrocca and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960336661&doi=10.1007%2f978-3-642-21593-3_32&partnerID=40&md5=47ca975800e68648e02f76eba89a7457},
doi = {10.1007/978-3-642-21593-3_32},
issn = {03029743},
year = {2011},
date = {2011-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {6753 LNCS},
number = {PART 1},
pages = {314–324},
abstract = {In this paper, we propose a new approach for detecting people in video sequences based on geometrical features and AdaBoost learning. Unlike its predecessors, our approach uses features calculated directly from silhouettes produced by change detection algorithms. Moreover, feature analysis is done part by part for each silhouette, making our approach efficiently applicable for partially-occluded pedestrians and groups of people detection. Experiments on real-world videos showed us the performance of the proposed approach for real-time pedestrian detection. © 2011 Springer-Verlag.},
note = {ISBN: 9783642215926
Place: Burnaby, BC},
keywords = {Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Baaziz, N.
Contourlet-based texture retrieval using a mixture of generalized Gaussian distributions Journal Article
In: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 6855 LNCS, no. PART 2, pp. 446–454, 2011, ISSN: 03029743, (ISBN: 9783642236778 Place: Seville).
Abstract | Links | BibTeX | Tags: Contourlet transform, Contourlets, Distribution modelling, Finite mixtures, Gaussian distribution, Generalized Gaussian Distributions, Image analysis, Kullback-Leibler divergence, Mixtures, Monte-Carlo sampling, Probability density function, Similarity measure, Statistical representations, Texture discrimination, Texture retrieval, Textures
@article{allili_contourlet-based_2011,
title = {Contourlet-based texture retrieval using a mixture of generalized Gaussian distributions},
author = {M. S. Allili and N. Baaziz},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80052796353&doi=10.1007%2f978-3-642-23678-5_53&partnerID=40&md5=fde8aaeea1609c81747b0ab27a8c78ce},
doi = {10.1007/978-3-642-23678-5_53},
issn = {03029743},
year = {2011},
date = {2011-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {6855 LNCS},
number = {PART 2},
pages = {446–454},
abstract = {We address the texture retrieval problem using contourlet-based statistical representation. We propose a new contourlet distribution modelling using finite mixtures of generalized Gaussian distributions (MoGG). The MoGG allows to capture a wide range of contourlet histogram shapes, which provides better description and discrimination of texture than using single probability density functions (pdfs). We propose a model similarity measure based on Kullback-Leibler divergence (KLD) approximation using Monte-Carlo sampling methods. We show that our approach using a redundant contourlet transform yields better texture discrimination and retrieval results than using other methods of statistical-based wavelet/contourlet modelling. © 2011 Springer-Verlag.},
note = {ISBN: 9783642236778
Place: Seville},
keywords = {Contourlet transform, Contourlets, Distribution modelling, Finite mixtures, Gaussian distribution, Generalized Gaussian Distributions, Image analysis, Kullback-Leibler divergence, Mixtures, Monte-Carlo sampling, Probability density function, Similarity measure, Statistical representations, Texture discrimination, Texture retrieval, Textures},
pubstate = {published},
tppubtype = {article}
}
Ziou, D.; Bouguila, N.; Allili, M. S.; El-Zaart, A.
Finite Gamma mixture modelling using minimum message length inference: Application to SAR image analysis Journal Article
In: International Journal of Remote Sensing, vol. 30, no. 3, pp. 771–792, 2009, ISSN: 01431161, (Publisher: Taylor and Francis Ltd.).
Abstract | Links | BibTeX | Tags: Change detection, Determining the number of clusters, estimation method, finite element method, Finite mixtures, Gamma distribution, Gamma mixtures, Image analysis, Image processing, Image segmentation, Minimum message lengths, Mixtures, Number of clusters, numerical model, Probability distributions, Radar imaging, SAR image segmentation, Synthetic aperture radar, Unsupervised learning
@article{ziou_finite_2009,
title = {Finite Gamma mixture modelling using minimum message length inference: Application to SAR image analysis},
author = {D. Ziou and N. Bouguila and M. S. Allili and A. El-Zaart},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67650686123&doi=10.1080%2f01431160802392646&partnerID=40&md5=901ea39ad806dcb62cd630585469af60},
doi = {10.1080/01431160802392646},
issn = {01431161},
year = {2009},
date = {2009-01-01},
journal = {International Journal of Remote Sensing},
volume = {30},
number = {3},
pages = {771–792},
abstract = {This paper discusses the unsupervised learning problem for finite mixtures of Gamma distributions. An important part of this problem is determining the number of clusters which best describes a set of data. We apply the Minimum Message Length (MML) criterion to the unsupervised learning problem in the case of finite mixtures of Gamma distributions. The MML and other criteria in the literature are compared in terms of their ability to estimate the number of clusters in a data set. The comparison utilizes synthetic and RADARSAT SAR images. The performance of our method is also tested by contextual evaluations involving SAR image segmentation and change detection.},
note = {Publisher: Taylor and Francis Ltd.},
keywords = {Change detection, Determining the number of clusters, estimation method, finite element method, Finite mixtures, Gamma distribution, Gamma mixtures, Image analysis, Image processing, Image segmentation, Minimum message lengths, Mixtures, Number of clusters, numerical model, Probability distributions, Radar imaging, SAR image segmentation, Synthetic aperture radar, Unsupervised learning},
pubstate = {published},
tppubtype = {article}
}
Fiset, D.; Blais, C.; Arguin, M.; Tadros, K.; Éthier-Majcher, C.; Bub, D.; Gosselin, F.
The spatio-temporal dynamics of visual letter recognition Journal Article
In: Cognitive Neuropsychology, vol. 26, no. 1, pp. 23–35, 2009, ISSN: 02643294.
Abstract | Links | BibTeX | Tags: article, bootstrapping, Bubbles technique, Discrimination Learning, human, Humans, Image analysis, linear regression analysis, methodology, Models, Nonlinear Dynamics, nonlinear system, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, Psychological, psychological model, reaction time, recognition, Recognition (Psychology), task performance, temporal summation, time, Time Factors, Visual, word recognition
@article{fiset_spatio-temporal_2009,
title = {The spatio-temporal dynamics of visual letter recognition},
author = {D. Fiset and C. Blais and M. Arguin and K. Tadros and C. Éthier-Majcher and D. Bub and F. Gosselin},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67649525418&doi=10.1080%2f02643290802421160&partnerID=40&md5=bca7bda93d59994f2679faff9d93f46a},
doi = {10.1080/02643290802421160},
issn = {02643294},
year = {2009},
date = {2009-01-01},
journal = {Cognitive Neuropsychology},
volume = {26},
number = {1},
pages = {23–35},
abstract = {We applied the Bubbles technique to reveal directly the spatio-temporal features of uppercase Arial letter identification. We asked four normal readers to each identify 26,000 letters that were randomly sampled in space and time; afterwards, we performed multiple linear regressions on the participant's response accuracy and the space-time samples. We contend that each cluster of connected significant regression coefficients is a letter feature. To bridge the gap between the letter identification literature and this experiment, we also determined the relative importance of the features proposed in the letter identification literature. Results show clear modulations of the relative importance of the letter features of some letters across time, demonstrating that letter features are not always extracted simultaneously at constant speeds. Furthermore, of all the feature classes proposed in the literature, line terminations and horizontals appear to be the two most important for letter identification. © 2008 Psychology Press, an imprint of the Taylor & Francis Group.},
keywords = {article, bootstrapping, Bubbles technique, Discrimination Learning, human, Humans, Image analysis, linear regression analysis, methodology, Models, Nonlinear Dynamics, nonlinear system, Pattern Recognition, Photic Stimulation, photostimulation, physiology, priority journal, Psychological, psychological model, reaction time, recognition, Recognition (Psychology), task performance, temporal summation, time, Time Factors, Visual, word recognition},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Object tracking in videos using adaptive mixture models and active contours Journal Article
In: Neurocomputing, vol. 71, no. 10-12, pp. 2001–2011, 2008, ISSN: 09252312.
Abstract | Links | BibTeX | Tags: Active contours, algorithm, Algorithms, article, controlled study, Image analysis, Image processing, imaging system, Level set method, Mathematical models, motion analysis system, Object recognition, priority journal, Set theory, statistical model, Video cameras, Video sequences, videorecording, visual information
@article{allili_object_2008,
title = {Object tracking in videos using adaptive mixture models and active contours},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-44649197137&doi=10.1016%2fj.neucom.2007.10.019&partnerID=40&md5=a2aef677fae1b220f68c9fd720be3fd5},
doi = {10.1016/j.neucom.2007.10.019},
issn = {09252312},
year = {2008},
date = {2008-01-01},
journal = {Neurocomputing},
volume = {71},
number = {10-12},
pages = {2001–2011},
abstract = {In this paper, we propose a novel object tracking algorithm for video sequences, based on active contours. The tracking is based on matching the object appearance model between successive frames of the sequence using active contours. We formulate the tracking as a minimization of an objective function incorporating region, boundary and shape information. Further, in order to handle variation in object appearance due to self-shadowing, changing illumination conditions and camera geometry, we propose an adaptive mixture model for the object representation. The implementation of the method is based on the level set method. We validate our approach on tracking examples using real video sequences, with comparison to two recent state-of-the-art methods. © 2008 Elsevier B.V. All rights reserved.},
keywords = {Active contours, algorithm, Algorithms, article, controlled study, Image analysis, Image processing, imaging system, Level set method, Mathematical models, motion analysis system, Object recognition, priority journal, Set theory, statistical model, Video cameras, Video sequences, videorecording, visual information},
pubstate = {published},
tppubtype = {article}
}