
@Article{cmes.2023.030512,
AUTHOR = {Sanxiu Jiao, Lecai Cai, Xinjie Wang, Kui Cheng, Xiang Gao},
TITLE = {A Differential Privacy Federated Learning Scheme Based on Adaptive Gaussian Noise},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {138},
YEAR = {2024},
NUMBER = {2},
PAGES = {1679--1694},
URL = {http://www.techscience.com/CMES/v138n2/54633},
ISSN = {1526-1506},
ABSTRACT = {As a distributed machine learning method, federated learning (FL) has the advantage of naturally protecting data
privacy. It keeps data locally and trains local models through local data to protect the privacy of local data. The
federated learning method effectively solves the problem of artificial Smart data islands and privacy protection
issues. However, existing research shows that attackers may still steal user information by analyzing the parameters
in the federated learning training process and the aggregation parameters on the server side. To solve this problem,
differential privacy (DP) techniques are widely used for privacy protection in federated learning. However, adding
Gaussian noise perturbations to the data degrades the model learning performance. To address these issues, this
paper proposes a differential privacy federated learning scheme based on adaptive Gaussian noise (DPFL-AGN).
To protect the data privacy and security of the federated learning training process, adaptive Gaussian noise is
specifically added in the training process to hide the real parameters uploaded by the client. In addition, this paper
proposes an adaptive noise reduction method. With the convergence of the model, the Gaussian noise in the later
stage of the federated learning training process is reduced adaptively. This paper conducts a series of simulation
experiments on real MNIST and CIFAR-10 datasets, and the results show that the DPFL-AGN algorithm performs
better compared to the other algorithms.},
DOI = {10.32604/cmes.2023.030512}
}



