

de Recherche et d’Innovation
en Cybersécurité et Société
Valem, L. P.; Pedronette, D. C. G.; Allili, M. S.
Contrastive Loss Based on Contextual Similarity for Image Classification Article d'actes
Dans: G., Bebis; V., Patel; J., Gu; J., Panetta; Y., Gingold; K., Johnsen; M.S., Arefin; S., Dutta; A., Biswas (Ed.): Lect. Notes Comput. Sci., p. 58–69, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303177391-4 (ISBN), (Journal Abbreviation: Lect. Notes Comput. Sci.).
Résumé | Liens | BibTeX | Étiquettes: Adversarial machine learning, Classification accuracy, Contrastive Learning, Cross entropy, Experimental evaluation, Federated learning, Image classification, Image comparison, Image embedding, Images classification, Model generalization, Model robustness, Neighborhood information, Self-supervised learning, Similarity measure
@inproceedings{valem_contrastive_2025,
title = {Contrastive Loss Based on Contextual Similarity for Image Classification},
author = {L. P. Valem and D. C. G. Pedronette and M. S. Allili},
editor = {Bebis G. and Patel V. and Gu J. and Panetta J. and Gingold Y. and Johnsen K. and Arefin M.S. and Dutta S. and Biswas A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85218461565&doi=10.1007%2f978-3-031-77392-1_5&partnerID=40&md5=cf885303646c3b1a4f4eacb87d02a2b6},
doi = {10.1007/978-3-031-77392-1_5},
isbn = {03029743 (ISSN); 978-303177391-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15046 LNCS},
pages = {58–69},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Contrastive learning has been extensively exploited in self-supervised and supervised learning due to its effectiveness in learning representations that distinguish between similar and dissimilar images. It offers a robust alternative to cross-entropy by yielding more semantically meaningful image embeddings. However, most contrastive losses rely on pairwise measures to assess the similarity between elements, ignoring more general neighborhood information that can be leveraged to enhance model robustness and generalization. In this paper, we propose the Contextual Contrastive Loss (CCL) to replace pairwise image comparison by introducing a new contextual similarity measure using neighboring elements. The CCL yields a more semantically meaningful image embedding ensuring better separability of classes in the latent space. Experimental evaluation on three datasets (Food101, MiniImageNet, and CIFAR-100) has shown that CCL yields superior results by achieving up to 10.76% relative gains in classification accuracy, particularly for fewer training epochs and limited training data. This demonstrates the potential of our approach, especially in resource-constrained scenarios. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
note = {Journal Abbreviation: Lect. Notes Comput. Sci.},
keywords = {Adversarial machine learning, Classification accuracy, Contrastive Learning, Cross entropy, Experimental evaluation, Federated learning, Image classification, Image comparison, Image embedding, Images classification, Model generalization, Model robustness, Neighborhood information, Self-supervised learning, Similarity measure},
pubstate = {published},
tppubtype = {inproceedings}
}