
@Article{cmes.2024.054820,
AUTHOR = {Yongsheng Zhu, Chong Liu, Chunlei Chen, Xiaoting Lyu, Zheng Chen, Bin Wang, Fuqiang Hu, Hanxi Li, Jiao Dai, Baigen Cai, Wei Wang},
TITLE = {Privacy-Preserving Large-Scale AI Models for Intelligent Railway Transportation Systems: Hierarchical Poisoning Attacks and Defenses in Federated Learning},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {141},
YEAR = {2024},
NUMBER = {2},
PAGES = {1305--1325},
URL = {http://www.techscience.com/CMES/v141n2/58164},
ISSN = {1526-1506},
ABSTRACT = {The development of Intelligent Railway Transportation Systems necessitates incorporating privacy-preserving mechanisms into AI models to protect sensitive information and enhance system efficiency. Federated learning offers a promising solution by allowing multiple clients to train models collaboratively without sharing private data. However, despite its privacy benefits, federated learning systems are vulnerable to poisoning attacks, where adversaries alter local model parameters on compromised clients and send malicious updates to the server, potentially compromising the global model’s accuracy. In this study, we introduce PMM (Perturbation coefficient Multiplied by Maximum value), a new poisoning attack method that perturbs model updates layer by layer, demonstrating the threat of poisoning attacks faced by federated learning. Extensive experiments across three distinct datasets have demonstrated PMM’s ability to significantly reduce the global model’s accuracy. Additionally, we propose an effective defense method, namely CLBL (Cluster Layer By Layer). Experiment results on three datasets have confirmed CLBL’s effectiveness.},
DOI = {10.32604/cmes.2024.054820}
}



