
@Article{jbd.2020.012294,
AUTHOR = {Kai Chen, Haoqi Zhu, Leiming Yan, Jinwei Wang},
TITLE = {A Survey on Adversarial Examples in Deep Learning},
JOURNAL = {Journal on Big Data},
VOLUME = {2},
YEAR = {2020},
NUMBER = {2},
PAGES = {71--84},
URL = {http://www.techscience.com/jbd/v2n2/40201},
ISSN = {2579-0056},
ABSTRACT = {Adversarial examples are hot topics in the field of security in deep 
learning. The feature, generation methods, attack and defense methods of the 
adversarial examples are focuses of the current research on adversarial examples. 
This article explains the key technologies and theories of adversarial examples 
from the concept of adversarial examples, the occurrences of the adversarial 
examples, the attacking methods of adversarial examples. This article lists the 
possible reasons for the adversarial examples. This article also analyzes several 
typical generation methods of adversarial examples in detail: Limited-memory 
BFGS (L-BFGS), Fast Gradient Sign Method (FGSM), Basic Iterative Method 
(BIM), Iterative Least-likely Class Method (LLC), etc. Furthermore, in the 
perspective of the attack methods and reasons of the adversarial examples, the 
main defense techniques for the adversarial examples are listed: preprocessing, 
regularization and adversarial training method, distillation method, etc., which 
application scenarios and deficiencies of different defense measures are pointed 
out. This article further discusses the application of adversarial examples which 
currently is mainly used in adversarial evaluation and adversarial training. 
Finally, the overall research direction of the adversarial examples is prospected 
to completely solve the adversarial attack problem. There are still a lot of 
practical and theoretical problems that need to be solved. Finding out the 
characteristics of the adversarial examples, giving a mathematical description of 
its practical application prospects, exploring the universal method of adversarial 
example generation and the generation mechanism of the adversarial examples 
are the main research directions of the adversarial examples in the future.},
DOI = {10.32604/jbd.2020.012294}
}



