

de Recherche et d’Innovation
en Cybersécurité et Société
Damadi, M. S.; Davoust, A.
Fairness in social machines: a systematic review Article de journal
Dans: Journal of Information, Communication and Ethics in Society, p. 1–40, 2026, ISSN: 1477996X (ISSN).
Résumé | Liens | BibTeX | Étiquettes: Cultural bias, Discrimination, Fairness, Gender bias, Geographic bias, Social machines
@article{damadi_fairness_2026,
title = {Fairness in social machines: a systematic review},
author = {M. S. Damadi and A. Davoust},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-105030531101&doi=10.1108%2FJICES-01-2025-0002&partnerID=40&md5=63e2f87ee3852ffe2d49c514e38cba1c},
doi = {10.1108/JICES-01-2025-0002},
issn = {1477996X (ISSN)},
year = {2026},
date = {2026-01-01},
journal = {Journal of Information, Communication and Ethics in Society},
pages = {1–40},
abstract = {Purpose – The purpose of the paper is to provide a systematic review of biases in social machines to better understand the general problem of fairness in these systems. It aims to identify and categorize phenomena described as biases toward specific demographic groups, frame them normatively as harmful and relate them to established fairness concepts originally defined for algorithmic systems. Design/methodology/approach – The phenomenon of algorithmic bias refers to systematic biases against identifiable demographic groups that occur in automated decisions systems. Such biases have mostly been studied in the context of black-box decision systems built using machine learning (ML). However, similar problems have also been reported in complex socio-technical systems such as Wikipedia and Airbnb, known more generally as social machines, where the observed biases cannot necessarily be attributed to specific automated decision systems. Instead, the biases may emerge as a result of complex processes involving numerous users and a computational infrastructure. To gain a better understanding of fairness in social machines, the authors select a representative sample of social machines from six distinct categories, and systematically review the literature reporting biases in these systems, covering 196 papers. The authors classify the reported bias phenomena, identify the affected demographic groups and relate the phenomena to established notions of harm from algorithmic fairness research. Finally, the authors identify the normative expectations of fairness associated with the different problems and discuss the applicability of existing criteria proposed for ML-driven decision systems. The analysis highlights the conceptual similarity of bias phenomena between algorithmic systems and social machines, allowing for a shared vocabulary to describe and compare phenomena across a broad class of systems. Findings – The paper identifies two key biases in social machines: representational harm, from underrepresentation or biased portrayal of disadvantaged groups, and allocative harm, from unfair decision processes, measurable via metrics like demographic parity. Gender bias is prevalent and easier to detect due to explicit markers, offering insights for identifying other biases. Unique biases arise from user categorizations, creating unintended discrimination linked to protected characteristics. These biases result from complex user interactions, not isolated algorithms. Addressing them requires redesigning social machines, focusing on computational infrastructure and interaction norms, such as visibility settings, to mitigate harmful outcomes. Originality/value – The paper’s originality lies in its systematic review of biases in social machines, offering a novel perspective on fairness in these systems. Unlike prior studies focusing solely on algorithmic fairness, this work examines the broader socio-technical interactions within social machines, identifying biases that emerge from user interactions and design choices. By linking these biases to established fairness concepts like demographic parity and representational harm, the paper bridges the gap between algorithmic fairness and social dynamics. © 2025 Emerald Publishing Limited},
keywords = {Cultural bias, Discrimination, Fairness, Gender bias, Geographic bias, Social machines},
pubstate = {published},
tppubtype = {article}
}
Damadi, M. S.; Davoust, A.
Fairness in Socio-Technical Systems: A Case Study of Wikipedia Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 14199 LNCS, p. 84–100, 2023, ISSN: 03029743, (ISBN: 9783031421402).
Résumé | Liens | BibTeX | Étiquettes: Algorithmics, Bias, Case-studies, Causal relationships, Cultural bias, Fairness, Gender bias, Machine learning, Machine-learning, Parallel processing systems, Sociotechnical systems, Wikipedia
@article{damadi_fairness_2023,
title = {Fairness in Socio-Technical Systems: A Case Study of Wikipedia},
author = {M. S. Damadi and A. Davoust},
editor = {Alvarez C. Marutschke D.M. Takada H.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85172720004&doi=10.1007%2f978-3-031-42141-9_6&partnerID=40&md5=172c8c6ae5b09536efdf983e9be965e7},
doi = {10.1007/978-3-031-42141-9_6},
issn = {03029743},
year = {2023},
date = {2023-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {14199 LNCS},
pages = {84–100},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Wikipedia content is produced by a complex socio-technical systems (STS), and exhibits numerous biases, such as gender and cultural biases. We investigate how these biases relate to the concepts of algorithmic bias and fairness defined in the context of algorithmic systems. We systematically review 75 papers describing different types of bias in Wikipedia, which we classify and relate to established notions of harm and normative expectations of fairness as defined for machine learning-driven algorithmic systems. In addition, by analysing causal relationships between the observed phenomena, we demonstrate the complexity of the socio-technical processes causing harm. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2023.},
note = {ISBN: 9783031421402},
keywords = {Algorithmics, Bias, Case-studies, Causal relationships, Cultural bias, Fairness, Gender bias, Machine learning, Machine-learning, Parallel processing systems, Sociotechnical systems, Wikipedia},
pubstate = {published},
tppubtype = {article}
}



