
@Article{cmc.2020.011834,
AUTHOR = {Zhaoquan Gu, Yu Su, Chenwei Liu, Yinyu Lyu, Yunxiang Jian, Hao Li, Zhen Cao, Le Wang},
TITLE = {Adversarial Attacks on License Plate Recognition Systems},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {65},
YEAR = {2020},
NUMBER = {2},
PAGES = {1437--1452},
URL = {http://www.techscience.com/cmc/v65n2/39886},
ISSN = {1546-2226},
ABSTRACT = {The license plate recognition system (LPRS) has been widely adopted in daily 
life due to its efficiency and high accuracy. Deep neural networks are commonly used in 
the LPRS to improve the recognition accuracy. However, researchers have found that 
deep neural networks have their own security problems that may lead to unexpected 
results. Specifically, they can be easily attacked by the adversarial examples that are 
generated by adding small perturbations to the original images, resulting in incorrect 
license plate recognition. There are some classic methods to generate adversarial 
examples, but they cannot be adopted on LPRS directly. In this paper, we modify some 
classic methods to generate adversarial examples that could mislead the LPRS. We 
conduct extensive evaluations on the HyperLPR system and the results show that the 
system could be easily attacked by such adversarial examples. In addition, we show that 
the generated images could also attack the black-box systems; we show some examples 
that the Baidu LPR system also makes incorrect recognitions. We hope this paper could 
help improve the LPRS by realizing the existence of such adversarial attacks.},
DOI = {10.32604/cmc.2020.011834}
}



