
@Article{jai.2021.014175,
AUTHOR = {Yuetian Wang, Chuanjing Zhang, Xuxin Liao, Xingang Wang, Zhaoquan Gu},
TITLE = {An Adversarial Attack System for Face Recognition},
JOURNAL = {Journal on Artificial Intelligence},
VOLUME = {3},
YEAR = {2021},
NUMBER = {1},
PAGES = {1--8},
URL = {http://www.techscience.com/jai/v3n1/42096},
ISSN = {2579-003X},
ABSTRACT = {Deep neural networks (DNNs) are widely adopted in daily life and the 
security problems of DNNs have drawn attention from both scientific researchers 
and industrial engineers. Many related works show that DNNs are vulnerable to 
adversarial examples that are generated with subtle perturbation to original images 
in both digital domain and physical domain. As a most common application of 
DNNs, face recognition systems are likely to cause serious consequences if they 
are attacked by the adversarial examples. In this paper, we implement an 
adversarial attack system for face recognition in both digital domain that generates 
adversarial face images to fool the recognition system, and physical domain that 
generates customized glasses to fool the system when a person wears the glasses. 
Experiments show that our system attacks face recognition systems effectively.
Furthermore, our system could misguide the recognition system to identify a 
person wearing the customized glasses as a certain target. We hope this research 
could help raise the attention of artificial intelligence security and promote 
building robust recognition systems.},
DOI = {10.32604/jai.2021.014175}
}



