

de Recherche et d’Innovation
en Cybersécurité et Société
Bacha, S.; Allili, M. S.; Kerbedj, T.; Chahboub, R.
Investigating food pairing hypothesis based on deep learning: Case of Algerian cuisine Article de journal
Dans: International Journal of Gastronomy and Food Science, vol. 39, 2025, ISSN: 1878450X (ISSN), (Publisher: AZTI-Tecnalia).
Résumé | Liens | BibTeX | Étiquettes: Algerian cuisine, Computational gastronomy, Deep learning, Food pairing hypothesis (FPH), Spectral clustering
@article{bacha_investigating_2025,
title = {Investigating food pairing hypothesis based on deep learning: Case of Algerian cuisine},
author = {S. Bacha and M. S. Allili and T. Kerbedj and R. Chahboub},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85214793354&doi=10.1016%2fj.ijgfs.2024.101098&partnerID=40&md5=b2548861f182c4ea1820fceec7003f82},
doi = {10.1016/j.ijgfs.2024.101098},
issn = {1878450X (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {International Journal of Gastronomy and Food Science},
volume = {39},
abstract = {Traditional cuisine is considered a core element of cultural identity. The choice of food can often be influenced by identity, culture, and geography. This work investigates the traditional Algerian cuisine by exploring the food pairing hypothesis, which stipulates that combined ingredients with common flavor compounds taste better than their counterpart. To gain insight into the ingredients compounds found in this cuisine, we analyze their characteristics using spectral clustering. Then, we propose a model based on LSTMs to test the food pairing hypothesis in the Algerian cuisine on a collected corpus. Our research shows that the Algerian cuisine has a negative food pairing tendency, which is consistent with the South European cuisine, suggesting broader regional culinary patterns. To the best of our knowledge, this is the first study to investigate the FPH in Algerian cuisine, contributing to a deeper understanding of the food pairing tendencies specific to this region and offering a comparative perspective with neighboring Mediterranean cuisines. © 2025 Elsevier B.V.},
note = {Publisher: AZTI-Tecnalia},
keywords = {Algerian cuisine, Computational gastronomy, Deep learning, Food pairing hypothesis (FPH), Spectral clustering},
pubstate = {published},
tppubtype = {article}
}
Bouafia, Y.; Allili, M. S.; Hebbache, L.; Guezouli, L.
SES-ReNet: Lightweight deep learning model for human detection in hazy weather conditions Article de journal
Dans: Signal Processing: Image Communication, vol. 130, 2025, ISSN: 09235965 (ISSN), (Publisher: Elsevier B.V.).
Résumé | Liens | BibTeX | Étiquettes: Condition, Deep learning, face recognition, Hazy weather, Human detection, Knowledge distillation, Learning models, Lightweight Retinanet, Outdoor scenes, Personal safety, Personal security, Safety and securities
@article{bouafia_ses-renet_2025,
title = {SES-ReNet: Lightweight deep learning model for human detection in hazy weather conditions},
author = {Y. Bouafia and M. S. Allili and L. Hebbache and L. Guezouli},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85208562795&doi=10.1016%2fj.image.2024.117223&partnerID=40&md5=322b79f8d78045395efcabcbf86c6e1c},
doi = {10.1016/j.image.2024.117223},
issn = {09235965 (ISSN)},
year = {2025},
date = {2025-01-01},
journal = {Signal Processing: Image Communication},
volume = {130},
abstract = {Accurate detection of people in outdoor scenes plays an essential role in improving personal safety and security. However, existing human detection algorithms face significant challenges when visibility is reduced and human appearance is degraded, particularly in hazy weather conditions. To address this problem, we present a novel lightweight model based on the RetinaNet detection architecture. The model incorporates a lightweight backbone feature extractor, a dehazing functionality based on knowledge distillation (KD), and a multi-scale attention mechanism based on the Squeeze and Excitation (SE) principle. KD is achieved from a larger network trained on unhazed clear images, whereas attention is incorporated at low-level and high-level features of the network. Experimental results have shown remarkable performance, outperforming state-of-the-art methods while running at 22 FPS. The combination of high accuracy and real-time capabilities makes our approach a promising solution for effective human detection in challenging weather conditions and suitable for real-time applications. © 2024},
note = {Publisher: Elsevier B.V.},
keywords = {Condition, Deep learning, face recognition, Hazy weather, Human detection, Knowledge distillation, Learning models, Lightweight Retinanet, Outdoor scenes, Personal safety, Personal security, Safety and securities},
pubstate = {published},
tppubtype = {article}
}
Lapointe, J. -F.; Allili, M. S.; Hammouche, N.
Field Trials of an AI-AR-Based System for Remote Bridge Inspection by Drone Article d'actes
Dans: D., Harris; W.-C., Li; H., Krömker (Ed.): Lect. Notes Comput. Sci., p. 278–287, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303176823-1 (ISBN), (Journal Abbreviation: Lect. Notes Comput. Sci.).
Résumé | Liens | BibTeX | Étiquettes: Advanced systems, Air navigation, Artificial intelligence, artificial intelligence (AI), augmented reality, augmented reality (AR), Bridge inspection, Concrete bridges, Drone, Drones, Field trial, HIgh speed networks, High-speed Networks, Network links, Performance, Remote guidance, Transportation infrastructures, UAV
@inproceedings{lapointe_field_2025,
title = {Field Trials of an AI-AR-Based System for Remote Bridge Inspection by Drone},
author = {J. -F. Lapointe and M. S. Allili and N. Hammouche},
editor = {Harris D. and Li W.-C. and Krömker H.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85213387549&doi=10.1007%2f978-3-031-76824-8_20&partnerID=40&md5=565ae5dded9cfdf27632e79e702c7718},
doi = {10.1007/978-3-031-76824-8_20},
isbn = {03029743 (ISSN); 978-303176823-1 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15381 LNCS},
pages = {278–287},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Bridge inspections are important to ensure the safety of users of these critical transportation infrastructures and avoid tragedies that could be caused by the collapse of these infrastructures. This paper describes the results of field trials of an advanced system for remotely guided inspection of bridges by a drone, which relies on artificial intelligence and augmented reality to achieve it. Results indicate that a high speed network link is critical to achieve good performance. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
note = {Journal Abbreviation: Lect. Notes Comput. Sci.},
keywords = {Advanced systems, Air navigation, Artificial intelligence, artificial intelligence (AI), augmented reality, augmented reality (AR), Bridge inspection, Concrete bridges, Drone, Drones, Field trial, HIgh speed networks, High-speed Networks, Network links, Performance, Remote guidance, Transportation infrastructures, UAV},
pubstate = {published},
tppubtype = {inproceedings}
}
Valem, L. P.; Pedronette, D. C. G.; Allili, M. S.
Contrastive Loss Based on Contextual Similarity for Image Classification Article d'actes
Dans: G., Bebis; V., Patel; J., Gu; J., Panetta; Y., Gingold; K., Johnsen; M.S., Arefin; S., Dutta; A., Biswas (Ed.): Lect. Notes Comput. Sci., p. 58–69, Springer Science and Business Media Deutschland GmbH, 2025, ISBN: 03029743 (ISSN); 978-303177391-4 (ISBN), (Journal Abbreviation: Lect. Notes Comput. Sci.).
Résumé | Liens | BibTeX | Étiquettes: Adversarial machine learning, Classification accuracy, Contrastive Learning, Cross entropy, Experimental evaluation, Federated learning, Image classification, Image comparison, Image embedding, Images classification, Model generalization, Model robustness, Neighborhood information, Self-supervised learning, Similarity measure
@inproceedings{valem_contrastive_2025,
title = {Contrastive Loss Based on Contextual Similarity for Image Classification},
author = {L. P. Valem and D. C. G. Pedronette and M. S. Allili},
editor = {Bebis G. and Patel V. and Gu J. and Panetta J. and Gingold Y. and Johnsen K. and Arefin M.S. and Dutta S. and Biswas A.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85218461565&doi=10.1007%2f978-3-031-77392-1_5&partnerID=40&md5=cf885303646c3b1a4f4eacb87d02a2b6},
doi = {10.1007/978-3-031-77392-1_5},
isbn = {03029743 (ISSN); 978-303177391-4 (ISBN)},
year = {2025},
date = {2025-01-01},
booktitle = {Lect. Notes Comput. Sci.},
volume = {15046 LNCS},
pages = {58–69},
publisher = {Springer Science and Business Media Deutschland GmbH},
abstract = {Contrastive learning has been extensively exploited in self-supervised and supervised learning due to its effectiveness in learning representations that distinguish between similar and dissimilar images. It offers a robust alternative to cross-entropy by yielding more semantically meaningful image embeddings. However, most contrastive losses rely on pairwise measures to assess the similarity between elements, ignoring more general neighborhood information that can be leveraged to enhance model robustness and generalization. In this paper, we propose the Contextual Contrastive Loss (CCL) to replace pairwise image comparison by introducing a new contextual similarity measure using neighboring elements. The CCL yields a more semantically meaningful image embedding ensuring better separability of classes in the latent space. Experimental evaluation on three datasets (Food101, MiniImageNet, and CIFAR-100) has shown that CCL yields superior results by achieving up to 10.76% relative gains in classification accuracy, particularly for fewer training epochs and limited training data. This demonstrates the potential of our approach, especially in resource-constrained scenarios. © The Author(s), under exclusive license to Springer Nature Switzerland AG 2025.},
note = {Journal Abbreviation: Lect. Notes Comput. Sci.},
keywords = {Adversarial machine learning, Classification accuracy, Contrastive Learning, Cross entropy, Experimental evaluation, Federated learning, Image classification, Image comparison, Image embedding, Images classification, Model generalization, Model robustness, Neighborhood information, Self-supervised learning, Similarity measure},
pubstate = {published},
tppubtype = {inproceedings}
}
Allaoui, M. L.; Allili, M. S.
MEDiXNet: A Robust Mixture of Expert Dermatological Imaging Networks for Skin Lesion Segmentation Article d'actes
Dans: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn., IEEE Computer Society, 2024, ISBN: 19457928 (ISSN); 979-835031333-8 (ISBN), (Journal Abbreviation: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.).
Résumé | Liens | BibTeX | Étiquettes: Attention mechanism, Attention mechanisms, Blurred boundaries, Cancer detection, Deep learning, Dermatology, Expert systems, Image segmentation, Lesion segmentations, Mixture of experts, Mixture of experts model, Mixture-of-experts model, Salient regions, Skin cancers, Skin lesion, Skin lesion segmentation
@inproceedings{allaoui_medixnet_2024,
title = {MEDiXNet: A Robust Mixture of Expert Dermatological Imaging Networks for Skin Lesion Segmentation},
author = {M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85203397643&doi=10.1109%2fISBI56570.2024.10635430&partnerID=40&md5=c95dd2122f03c944e945b684a111e741},
doi = {10.1109/ISBI56570.2024.10635430},
isbn = {19457928 (ISSN); 979-835031333-8 (ISBN)},
year = {2024},
date = {2024-01-01},
booktitle = {IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.},
publisher = {IEEE Computer Society},
abstract = {Skin lesion segmentation in dermatological imaging is crucial for early skin cancer detection. However, it is challenging due to variation in lesion appearance, blurred boundaries, and the presence of artifacts. Existing segmentation methods often fall short in accurately addressing these issues. We present MEDiXNet, a novel deep learning model combining expert networks with the Adaptive Salient Region Attention Module (ASRAM) to specifically tackle these challenges. Tailored for varying lesion types, MEDiXNet leverages ASRAM to enhance focus on critical regions, substantially improving segmentation accuracy. Tested on the ISIC datasets, it achieved a 94% Dice coefficient, surpassing state-of-the-art methods. MEDiXNet's innovative approach represents a significant advancement in dermatological imaging, promising to elevate the precision of skin cancer diagnostics. © 2024 IEEE.},
note = {Journal Abbreviation: IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recogn.},
keywords = {Attention mechanism, Attention mechanisms, Blurred boundaries, Cancer detection, Deep learning, Dermatology, Expert systems, Image segmentation, Lesion segmentations, Mixture of experts, Mixture of experts model, Mixture-of-experts model, Salient regions, Skin cancers, Skin lesion, Skin lesion segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Amirkhani, D.; Allili, M. S.; Hebbache, L.; Hammouche, N.; Lapointe, J.
Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review Article de journal
Dans: IEEE Transactions on Intelligent Transportation Systems, p. 1–23, 2024, ISSN: 15249050, (Publisher: Institute of Electrical and Electronics Engineers Inc.).
Résumé | Liens | BibTeX | Étiquettes: Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization
@article{amirkhani_visual_2024,
title = {Visual Concrete Bridge Defect Classification and Detection Using Deep Learning: A Systematic Review},
author = {D. Amirkhani and M. S. Allili and L. Hebbache and N. Hammouche and J. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85186994244&doi=10.1109%2fTITS.2024.3365296&partnerID=40&md5=a9228252d620ad6d444cc395296ebac2},
doi = {10.1109/TITS.2024.3365296},
issn = {15249050},
year = {2024},
date = {2024-01-01},
journal = {IEEE Transactions on Intelligent Transportation Systems},
pages = {1–23},
abstract = {Visual inspection is an important process for maintaining bridges in road transportation systems, and preventing catastrophic events and tragedies. In this process, accurate and automatic concrete defect classification and detection are major components to ensure early identification of any issue that can compromise the bridge safety and integrity. While a tremendous body of research has been proposed in the last decades for addressing these problems, the advent of deep learning unleashed huge opportunities for building more accurate and efficient methods. Our aim in this survey is to study the recent progress of vision-based concrete bridge defect classification and detection in the deep learning era. Our review encompasses major aspects underlying typical frameworks, which include concrete defect taxonomy, public datasets and evaluation metrics. We provide also a taxonomy of deep-learning-based classification and detection algorithms with a detailed discussion of their advantages and limitations. We also benchmark baseline models for classification and detection, using two popular datasets. We finally discuss important challenges of concrete defect classification and detection, and promising research avenues to build better models and integrate them in real-world visual inspection systems, which warrant further scientific investigation. IEEE},
note = {Publisher: Institute of Electrical and Electronics Engineers Inc.},
keywords = {Annotation, Annotations, Bridges, Classification, Concrete, Concrete bridge defect, Concrete bridge defects, Concrete bridges, Concrete defects, Concretes, Deep learning, Defect classification, Defect detection, Defects, Detection, Inspection, Reviews, Segmentation, Taxonomies, Visualization},
pubstate = {published},
tppubtype = {article}
}
Nouboukpo, A.; Allaoui, M. L.; Allili, M. S.
Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation Article de journal
Dans: Engineering Applications of Artificial Intelligence, vol. 135, 2024, ISSN: 09521976 (ISSN), (Publisher: Elsevier Ltd).
Résumé | Liens | BibTeX | Étiquettes: Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data
@article{nouboukpo_multi-scale_2024,
title = {Multi-scale spatial consistency for deep semi-supervised skin lesion segmentation},
author = {A. Nouboukpo and M. L. Allaoui and M. S. Allili},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85195700182&doi=10.1016%2fj.engappai.2024.108681&partnerID=40&md5=e1cc2b6a1bb0aed530e8c04583c76167},
doi = {10.1016/j.engappai.2024.108681},
issn = {09521976 (ISSN)},
year = {2024},
date = {2024-01-01},
journal = {Engineering Applications of Artificial Intelligence},
volume = {135},
abstract = {This paper introduces a novel semi-supervised framework, the Multiscale Spatial Consistency Network (MSCNet), for robust semi-supervised skin lesion segmentation. MSCNet uses local and global spatial consistency to leverage a minimal set of labeled data, supplemented by a large number of unlabeled data, to improve segmentation. The model is is based on a single Encoder–Decoder (ED) network, augmented with a Spatially-Constrained Mixture Model (SCMM) to enforce spatial coherence in predictions. To encode the local spatial consistency, a hierarchical superpixel structure is used capture local region context (LRC), bolstering the model capacity to discern fine-grained lesion details. Global consistency is enforced through the SCMM module, which uses a larger context for lesion/background discrimination. In addition, it enables efficient leveraging of the unlabeled data through pseudo-label generation. Experiments demonstrate that the MSCNet outperforms existing state-of-the-art methods in segmenting complex lesions. The MSCNet has an excellent generalization capability, offering a promising direction for semi-supervised medical image segmentation, particularly in scenarios with limited annotated data. The code is available at https://github.com/AdamaTG/MSCNet. © 2024 Elsevier Ltd},
note = {Publisher: Elsevier Ltd},
keywords = {Deep learning, Dermatology, Image segmentation, Lesion segmentations, Medical imaging, Multi-scales, Semi-supervised, Semi-supervised learning, Skin lesion, Skin lesion segmentation, Spatial consistency, Spatially constrained mixture model, Spatially-constrained mixture models, Supervised learning, UNets, Unlabeled data},
pubstate = {published},
tppubtype = {article}
}
Hebbache, L.; Amirkhani, D.; Allili, M. S.; Hammouche, N.; Lapointe, J. -F.
Leveraging Saliency in Single-Stage Multi-Label Concrete Defect Detection Using Unmanned Aerial Vehicle Imagery Article de journal
Dans: Remote Sensing, vol. 15, no 5, 2023, ISSN: 20724292, (Publisher: MDPI).
Résumé | Liens | BibTeX | Étiquettes: Aerial vehicle, Aircraft detection, Antennas, Computational efficiency, Concrete defects, Deep learning, Defect detection, extraction, Feature extraction, Features extraction, Image acquisition, Image Enhancement, Multi-labels, One-stage concrete defect detection, Saliency, Single stage, Unmanned aerial vehicles (UAV), Unmanned areal vehicle imagery
@article{hebbache_leveraging_2023,
title = {Leveraging Saliency in Single-Stage Multi-Label Concrete Defect Detection Using Unmanned Aerial Vehicle Imagery},
author = {L. Hebbache and D. Amirkhani and M. S. Allili and N. Hammouche and J. -F. Lapointe},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85149966766&doi=10.3390%2frs15051218&partnerID=40&md5=7bf1cb3353270c696c07ff24dc24655d},
doi = {10.3390/rs15051218},
issn = {20724292},
year = {2023},
date = {2023-01-01},
journal = {Remote Sensing},
volume = {15},
number = {5},
abstract = {Visual inspection of concrete structures using Unmanned Areal Vehicle (UAV) imagery is a challenging task due to the variability of defects’ size and appearance. This paper proposes a high-performance model for automatic and fast detection of bridge concrete defects using UAV-acquired images. Our method, coined the Saliency-based Multi-label Defect Detector (SMDD-Net), combines pyramidal feature extraction and attention through a one-stage concrete defect detection model. The attention module extracts local and global saliency features, which are scaled and integrated with the pyramidal feature extraction module of the network using the max-pooling, multiplication, and residual skip connections operations. This has the effect of enhancing the localisation of small and low-contrast defects, as well as the overall accuracy of detection in varying image acquisition ranges. Finally, a multi-label loss function detection is used to identify and localise overlapping defects. The experimental results on a standard dataset and real-world images demonstrated the performance of SMDD-Net with regard to state-of-the-art techniques. The accuracy and computational efficiency of SMDD-Net make it a suitable method for UAV-based bridge structure inspection. © 2023 by the authors.},
note = {Publisher: MDPI},
keywords = {Aerial vehicle, Aircraft detection, Antennas, Computational efficiency, Concrete defects, Deep learning, Defect detection, extraction, Feature extraction, Features extraction, Image acquisition, Image Enhancement, Multi-labels, One-stage concrete defect detection, Saliency, Single stage, Unmanned aerial vehicles (UAV), Unmanned areal vehicle imagery},
pubstate = {published},
tppubtype = {article}
}
Yapi, D.; Nouboukpo, A.; Allili, M. S.; Member, IEEE
Mixture of multivariate generalized Gaussians for multi-band texture modeling and representation Article de journal
Dans: Signal Processing, vol. 209, 2023, ISSN: 01651684, (Publisher: Elsevier B.V.).
Résumé | Liens | BibTeX | Étiquettes: Color texture retrieval, Content-based, Content-based color-texture retrieval, Convolution, convolutional neural network, Gaussians, Image retrieval, Image texture, Mixture of multivariate generalized gaussians, Multi-scale Decomposition, Subbands, Texture representation, Textures
@article{yapi_mixture_2023,
title = {Mixture of multivariate generalized Gaussians for multi-band texture modeling and representation},
author = {D. Yapi and A. Nouboukpo and M. S. Allili and IEEE Member},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85151300047&doi=10.1016%2fj.sigpro.2023.109011&partnerID=40&md5=3bf98e9667eb7b60cb3f59ed1dcb029c},
doi = {10.1016/j.sigpro.2023.109011},
issn = {01651684},
year = {2023},
date = {2023-01-01},
journal = {Signal Processing},
volume = {209},
abstract = {We present a unified statistical model for multivariate and multi-modal texture representation. This model is based on the formalism of finite mixtures of multivariate generalized Gaussians (MoMGG) which enables to build a compact and accurate representation of texture images using multi-resolution texture transforms. The MoMGG model enables to describe the joint statistics of subbands in different scales and orientations, as well as between adjacent locations within the same subband, providing a precise description of the texture layout. It can also combine different multi-scale transforms to build a richer and more representative texture signature for image similarity measurement. We tested our model on both traditional texture transforms (e.g., wavelets, contourlets, maximum response filter) and convolution neural networks (CNNs) features (e.g., ResNet, SqueezeNet). Experiments on color-texture image retrieval have demonstrated the performance of our approach comparatively to state-of-the-art methods. © 2023},
note = {Publisher: Elsevier B.V.},
keywords = {Color texture retrieval, Content-based, Content-based color-texture retrieval, Convolution, convolutional neural network, Gaussians, Image retrieval, Image texture, Mixture of multivariate generalized gaussians, Multi-scale Decomposition, Subbands, Texture representation, Textures},
pubstate = {published},
tppubtype = {article}
}
Lapointe, J. -F.; Allili, M. S.; Belliveau, L.; Hebbache, L.; Amirkhani, D.; Sekkati, H.
AI-AR for Bridge Inspection by Drone Article de journal
Dans: Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 13318 LNCS, p. 302–313, 2022, ISSN: 03029743, (ISBN: 9783031060144 Publisher: Springer Science and Business Media Deutschland GmbH).
Résumé | Liens | BibTeX | Étiquettes: AR, augmented reality, Bridge inspection, Bridges, Deep learning, Drone, Drones, Human-in-the-loop, Inspection, Regular inspections, Remote guidance, RPAS, Transportation infrastructures, Visual inspection
@article{lapointe_ai-ar_2022,
title = {AI-AR for Bridge Inspection by Drone},
author = {J. -F. Lapointe and M. S. Allili and L. Belliveau and L. Hebbache and D. Amirkhani and H. Sekkati},
editor = {Fragomeni G. Chen J.Y.},
url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85131961739&doi=10.1007%2f978-3-031-06015-1_21&partnerID=40&md5=f57dfc1d9207b936684f18893eb5bfa7},
doi = {10.1007/978-3-031-06015-1_21},
issn = {03029743},
year = {2022},
date = {2022-01-01},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
volume = {13318 LNCS},
pages = {302–313},
abstract = {Good and regular inspections of transportation infrastructures such as bridges and overpasses are necessary to maintain the safety of the public who uses them and the integrity of the structures. Until recently, these inspections were done entirely manually by using mainly visual inspection to detect defects on the structure. In the last few years, inspection by drone is an emerging way of achieving inspection that allows more efficient access to the structure. This paper describes a human-in-the-loop system that combines AI and AR for bridge inspection by drone. © 2022, Springer Nature Switzerland AG.},
note = {ISBN: 9783031060144
Publisher: Springer Science and Business Media Deutschland GmbH},
keywords = {AR, augmented reality, Bridge inspection, Bridges, Deep learning, Drone, Drones, Human-in-the-loop, Inspection, Regular inspections, Remote guidance, RPAS, Transportation infrastructures, Visual inspection},
pubstate = {published},
tppubtype = {article}
}