

de Recherche et d’Innovation
en Cybersécurité et Société
Audet, F.; Allili, M. S.; Cretu, A. -M.
Salient object detection in images by combining objectness clues in the RGBD space Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 10317 LNCS, p. 247–255, 2017, ISSN: 03029743, (ISBN: 9783319598758 Publisher: Springer Verlag).
Résumé | Liens | BibTeX | Étiquettes: Color, Color information, Depth information, Image analysis, Multistage approach, Object detection, Object recognition, Potential region, Real-world image, Salient object detection, Salient objects, Statistical distribution, Voting machines
@article{audet_salient_2017,
title = {Salient object detection in images by combining objectness clues in the RGBD space},
author = {F. Audet and M. S. Allili and A. -M. Cretu},
editor = {Campilho A. Karray F. Cheriet F.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85022229105&doi=10.1007%2f978-3-319-59876-5_28&partnerID=40&md5=d78eb69cecd0a34ca2d517cfee44ef54},
doi = {10.1007/978-3-319-59876-5_28},
issn = {03029743},
year = {2017},
date = {2017-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {10317 LNCS},
pages = {247–255},
abstract = {We propose a multi-stage approach for salient object detection in natural images which incorporates color and depth information. In the first stage, color and depth channels are explored separately through objectness-based measures to detect potential regions containing salient objects. This procedure produces a list of bounding boxes which are further filtered and refined using statistical distributions. The retained candidates from both color and depth channels are then combined using a voting system. The final stage consists of combining the extracted candidates from color and depth channels using a voting system that produces a final map narrowing the location of the salient object. Experimental results on real-world images have proved the performance of the proposed method in comparison with the case where only color information is used. © Springer International Publishing AG 2017.},
note = {ISBN: 9783319598758
Publisher: Springer Verlag},
keywords = {Color, Color information, Depth information, Image analysis, Multistage approach, Object detection, Object recognition, Potential region, Real-world image, Salient object detection, Salient objects, Statistical distribution, Voting machines},
pubstate = {published},
tppubtype = {article}
}
Filali, I.; Allili, M. S.; Benblidia, N.
Multi-graph based salient object detection Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 9730, p. 318–324, 2016, ISSN: 03029743, (ISBN: 9783319415000 Publisher: Springer Verlag).
Résumé | Liens | BibTeX | Étiquettes: Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects
@article{filali_multi-graph_2016,
title = {Multi-graph based salient object detection},
author = {I. Filali and M. S. Allili and N. Benblidia},
editor = {Karray F. Campilho A. Campilho A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978804496&doi=10.1007%2f978-3-319-41501-7_36&partnerID=40&md5=eb519756d2e72245e4131d5dc0b416b5},
doi = {10.1007/978-3-319-41501-7_36},
issn = {03029743},
year = {2016},
date = {2016-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {9730},
pages = {318–324},
abstract = {We propose a multi-layer graph based approach for salient object detection in natural images. Starting from a set of multi-scale image decomposition using superpixels, we propose an objective function optimized on a multi-layer graph structure to diffuse saliency from image borders to salient objects. After isolating the object kernel, we enhance the accuracy of our saliency maps through an objectness-like based refinement approach. Beside its simplicity, our algorithm yields very accurate salient objects with clear boundaries. Experiments have shown that our approach outperforms several recent methods dealing with salient object detection. © Springer International Publishing Switzerland 2016.},
note = {ISBN: 9783319415000
Publisher: Springer Verlag},
keywords = {Graphic methods, Image analysis, Image segmentation, Multi-layer graphs, Multi-scale image decomposition, Multiscale segmentation, Natural images, Object detection, Object recognition, Objective functions, Saliency map, Salient object detection, Salient objects},
pubstate = {published},
tppubtype = {article}
}
Filali, I.; Allili, M. S.; Benblidia, N.
Multi-scale salient object detection using graph ranking and global–local saliency refinement Article de journal
Dans: Signal Processing: Image Communication, vol. 47, p. 380–401, 2016, ISSN: 09235965, (Publisher: Elsevier B.V.).
Résumé | Liens | BibTeX | Étiquettes: Algorithms, Boundary information, Decision trees, Feature relevance, Iterative methods, Multi-layer graphs, Object detection, Object recognition, Random forests, Salient object detection
@article{filali_multi-scale_2016,
title = {Multi-scale salient object detection using graph ranking and global–local saliency refinement},
author = {I. Filali and M. S. Allili and N. Benblidia},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84982091007&doi=10.1016%2fj.image.2016.07.007&partnerID=40&md5=60dabe68b5cff4b5d00216d6a632e1cd},
doi = {10.1016/j.image.2016.07.007},
issn = {09235965},
year = {2016},
date = {2016-01-01},
journal = {Signal Processing: Image Communication},
volume = {47},
pages = {380–401},
abstract = {We propose an algorithm for salient object detection (SOD) based on multi-scale graph ranking and iterative local–global object refinement. Starting from a set of multi-scale image decompositions using superpixels, we propose an objective function which is optimized on a multi-layer graph structure to diffuse saliency from image borders to salient objects. This step aims at roughly estimating the location and extent of salient objects in the image. We then enhance the object saliency through an iterative process employing random forests and local boundary refinement using color, texture and edge information. We also use a feature weighting scheme to ensure optimal object/background discrimination. Our algorithm yields very accurate saliency maps for SOD while maintaining a reasonable computational time. Experiments on several standard datasets have shown that our approach outperforms several recent methods dealing with SOD. © 2016 Elsevier B.V.},
note = {Publisher: Elsevier B.V.},
keywords = {Algorithms, Boundary information, Decision trees, Feature relevance, Iterative methods, Multi-layer graphs, Object detection, Object recognition, Random forests, Salient object detection},
pubstate = {published},
tppubtype = {article}
}
Larivière, G.; Allili, M. S.
A learning probabilistic approach for object segmentation Article d'actes
Dans: Proceedings of the 2012 9th Conference on Computer and Robot Vision, CRV 2012, p. 86–93, Toronto, ON, 2012, ISBN: 978-076954683-4 (ISBN), (Journal Abbreviation: Proc. Conf. Comput. Rob. Vis., CRV).
Résumé | Liens | BibTeX | Étiquettes: Algorithms, Computer vision, fragments, Image segmentation, Mean shift algorithm, mean-shift algorithm, Object recognition, Object segmentation, Object shape, Optimal segmentation, Probabilistic approaches, Probabilistic Learning, Segmentation process
@inproceedings{lariviere_learning_2012,
title = {A learning probabilistic approach for object segmentation},
author = {G. Larivière and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84878376248&doi=10.1109%2fCRV.2012.19&partnerID=40&md5=044a531d9d6de8036a434993f7b5d7ba},
doi = {10.1109/CRV.2012.19},
isbn = {978-076954683-4 (ISBN)},
year = {2012},
date = {2012-01-01},
booktitle = {Proceedings of the 2012 9th Conference on Computer and Robot Vision, CRV 2012},
pages = {86–93},
address = {Toronto, ON},
abstract = {This paper proposes a new method for figure-ground image segmentation based on a probabilistic learning approach of the object shape. Historically, segmentation is mostly defined as a data-driven bottom-up process, where pixels are grouped into regions/objects according to objective criteria, such as region homogeneity, etc. In particular, it aims at creating a partition of the image into contiguous, homogenous regions. In the proposed work, we propose to incorporate prior knowledge about the object shape and category to segment the object from the background. The segmentation process is composed of two parts. In the first part, object shape models are built using sets of object fragments. The second part starts by first segmenting an image into homogenous regions using the mean-shift algorithm. Then, several object hypotheses are tested and validated using the different object shape models as supporting information. As an output, our algorithm identifies the object category, position, as well as its optimal segmentation. Experimental results show the capacity of the approach to segment several object categories. © 2012 IEEE.},
note = {Journal Abbreviation: Proc. Conf. Comput. Rob. Vis., CRV},
keywords = {Algorithms, Computer vision, fragments, Image segmentation, Mean shift algorithm, mean-shift algorithm, Object recognition, Object segmentation, Object shape, Optimal segmentation, Probabilistic approaches, Probabilistic Learning, Segmentation process},
pubstate = {published},
tppubtype = {inproceedings}
}
Pedrocca, P. J.; Allili, M. S.
Real-time people detection in videos using geometrical features and adaptive boosting Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 6753 LNCS, no PART 1, p. 314–324, 2011, ISSN: 03029743, (ISBN: 9783642215926 Place: Burnaby, BC).
Résumé | Liens | BibTeX | Étiquettes: Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences
@article{pedrocca_real-time_2011,
title = {Real-time people detection in videos using geometrical features and adaptive boosting},
author = {P. J. Pedrocca and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960336661&doi=10.1007%2f978-3-642-21593-3_32&partnerID=40&md5=47ca975800e68648e02f76eba89a7457},
doi = {10.1007/978-3-642-21593-3_32},
issn = {03029743},
year = {2011},
date = {2011-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {6753 LNCS},
number = {PART 1},
pages = {314–324},
abstract = {In this paper, we propose a new approach for detecting people in video sequences based on geometrical features and AdaBoost learning. Unlike its predecessors, our approach uses features calculated directly from silhouettes produced by change detection algorithms. Moreover, feature analysis is done part by part for each silhouette, making our approach efficiently applicable for partially-occluded pedestrians and groups of people detection. Experiments on real-world videos showed us the performance of the proposed approach for real-time pedestrian detection. © 2011 Springer-Verlag.},
note = {ISBN: 9783642215926
Place: Burnaby, BC},
keywords = {Adaboost learning, Adaptive boosting, Change detection algorithms, Feature analysis, Feature extraction, Geometrical features, Geometry, Image analysis, Object recognition, Pedestrian detection, People detection, Real world videos, Signal detection, Video sequences},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Object tracking in videos using adaptive mixture models and active contours Article de journal
Dans: Neurocomputing, vol. 71, no 10-12, p. 2001–2011, 2008, ISSN: 09252312.
Résumé | Liens | BibTeX | Étiquettes: Active contours, algorithm, Algorithms, article, controlled study, Image analysis, Image processing, imaging system, Level set method, Mathematical models, motion analysis system, Object recognition, priority journal, Set theory, statistical model, Video cameras, Video sequences, videorecording, visual information
@article{allili_object_2008,
title = {Object tracking in videos using adaptive mixture models and active contours},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-44649197137&doi=10.1016%2fj.neucom.2007.10.019&partnerID=40&md5=a2aef677fae1b220f68c9fd720be3fd5},
doi = {10.1016/j.neucom.2007.10.019},
issn = {09252312},
year = {2008},
date = {2008-01-01},
journal = {Neurocomputing},
volume = {71},
number = {10-12},
pages = {2001–2011},
abstract = {In this paper, we propose a novel object tracking algorithm for video sequences, based on active contours. The tracking is based on matching the object appearance model between successive frames of the sequence using active contours. We formulate the tracking as a minimization of an objective function incorporating region, boundary and shape information. Further, in order to handle variation in object appearance due to self-shadowing, changing illumination conditions and camera geometry, we propose an adaptive mixture model for the object representation. The implementation of the method is based on the level set method. We validate our approach on tracking examples using real video sequences, with comparison to two recent state-of-the-art methods. © 2008 Elsevier B.V. All rights reserved.},
keywords = {Active contours, algorithm, Algorithms, article, controlled study, Image analysis, Image processing, imaging system, Level set method, Mathematical models, motion analysis system, Object recognition, priority journal, Set theory, statistical model, Video cameras, Video sequences, videorecording, visual information},
pubstate = {published},
tppubtype = {article}
}
Allili, M. S.; Ziou, D.
Object of interest segmentation and tracking by using feature selection and active contours Article d'actes
Dans: Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition, Minneapolis, MN, 2007, ISBN: 1-4244-1180-7 978-1-4244-1180-1, (ISSN: 10636919).
Résumé | Liens | BibTeX | Étiquettes: Feature extraction, Image acquisition, Image segmentation, Object recognition, Object segmentation, Objective functions, Optimization
@inproceedings{allili_object_2007,
title = {Object of interest segmentation and tracking by using feature selection and active contours},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34948855864&doi=10.1109%2fCVPR.2007.383449&partnerID=40&md5=2429a266190c72bb8fb8d3776c444906},
doi = {10.1109/CVPR.2007.383449},
isbn = {1-4244-1180-7 978-1-4244-1180-1},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
address = {Minneapolis, MN},
abstract = {Most image segmentation algorithms in the past are based on optimizing an objective function that aims to achieve the similarity between several low-level features to build a partition of the image into homogeneous regions. In the present paper, we propose to incorporate the relevance (selection) of the grouping features to enforce the segmentation toward the capturing of objects of interest. The relevance of the features is determined through a set of positive and negative examples of a specific object defined a priori by the user. The calculation of the relevance of the features is performed by maximizing an objective function defined on the mixture likelihoods of the positive and negative object examples sets. The incorporation of the features relevance in the object segmentation is formulated through an energy functional which is minimized by using level set active contours. We show the efficiency of the approach on several examples of object of interest segmentation and tracking where the features relevance is used. © 2007 IEEE.},
note = {ISSN: 10636919},
keywords = {Feature extraction, Image acquisition, Image segmentation, Object recognition, Object segmentation, Objective functions, Optimization},
pubstate = {published},
tppubtype = {inproceedings}
}
Allili, M. S.; Ziou, D.
A robust video object tracking by using active contours Article d'actes
Dans: 2006 Conference on Computer Vision and Pattern Recognition Workshops, p. 135, IEEE Computer Society, New York, NY, 2006, ISBN: 0769526462 (ISBN); 978-076952646-1 (ISBN), (Journal Abbreviation: Conf. Comput. Vision Pattern Recog. Workshops).
Résumé | Liens | BibTeX | Étiquettes: Boundary, Boundary localization, Color, Feature distribution, Image processing, Image segmentation, Kullback-Leibler distance, Level sets, Mathematical models, Mixture of pdfs, Object recognition, Object Tracking, Texture, Tracking (position), Variational techniques, Video object tracking
@inproceedings{allili_robust_2006,
title = {A robust video object tracking by using active contours},
author = {M. S. Allili and D. Ziou},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33845513941&doi=10.1109%2fCVPRW.2006.20&partnerID=40&md5=64ff2be5c45a6c206420bf6eb5589bca},
doi = {10.1109/CVPRW.2006.20},
isbn = {0769526462 (ISBN); 978-076952646-1 (ISBN)},
year = {2006},
date = {2006-01-01},
booktitle = {2006 Conference on Computer Vision and Pattern Recognition Workshops},
volume = {2006},
pages = {135},
publisher = {IEEE Computer Society},
address = {New York, NY},
abstract = {In this paper, we propose a novel object tracking algorithm in video sequences. The formulation of our tracking model is based on variational calculus, where region and boundary information cooperate for object boundary localization by using active contours. In the approach, only the segmentation of the objects in the first frame is required for initialization. The evolution of the object contours on a current frame aims to find the boundary of the objects by minimizing the Kullback-Leibler distance of the region feature s distribution in the vicinity of the contour to the objects versus the background respectively. We show the effectiveness of the approach on examples of object tracking performed on real video sequences. © 2006 IEEE.},
note = {Journal Abbreviation: Conf. Comput. Vision Pattern Recog. Workshops},
keywords = {Boundary, Boundary localization, Color, Feature distribution, Image processing, Image segmentation, Kullback-Leibler distance, Level sets, Mathematical models, Mixture of pdfs, Object recognition, Object Tracking, Texture, Tracking (position), Variational techniques, Video object tracking},
pubstate = {published},
tppubtype = {inproceedings}
}