
@Article{cmc.2026.076623,
AUTHOR = {Trong-Thua Huynh, De-Thu Huynh, Du-Thang Phu, Hong-Son Nguyen, Quoc H. Nguyen},
TITLE = {MobiIris: Attention-Enhanced Lightweight Iris Recognition with Knowledge Distillation and Quantization},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {87},
YEAR = {2026},
NUMBER = {3},
PAGES = {--},
URL = {http://www.techscience.com/cmc/v87n3/66934},
ISSN = {1546-2226},
ABSTRACT = {This paper introduces MobiIris, a lightweight deep network for mobile iris recognition that enhances attention and specifically addresses the balance between accuracy and efficiency on devices with limited resources. The proposed model is based on the large version of MobileNetV3 and adds more spatial attention blocks and an embedding-based head that was trained using margin-based triplet learning, enabling fine-grained modeling of iris textures in a compact representation. To further improve discriminability, we design a training pipeline that combines dynamic-margin triplet loss, a staged hard/semi-hard negative mining strategy, and feature-level knowledge distillation from a ResNet-50 teacher. Finally, we investigate the use of post-training float16 quantization to reduce memory footprint and latency for deployment on mobile hardware. Experiments on the challenging CASIA-IrisV4-Thousand dataset show that the full-precision MobiIris model requires only <mml:math id="mml-ieqn-1"><mml:mn>12</mml:mn></mml:math> MB of storage and <mml:math id="mml-ieqn-2"><mml:mn>27</mml:mn></mml:math> ms inference latency, while achieving an EER of <mml:math id="mml-ieqn-3"><mml:mn>1.409</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, VR@FAR = <mml:math id="mml-ieqn-4"><mml:mn>1</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math> of <mml:math id="mml-ieqn-5"><mml:mn>98.184</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, and CMC@<mml:math id="mml-ieqn-6"><mml:mn>1</mml:mn></mml:math> of <mml:math id="mml-ieqn-7"><mml:mn>94.785</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, closely matching a ResNet-50 baseline that is more than <mml:math id="mml-ieqn-8"><mml:mn>7</mml:mn><mml:mo>×</mml:mo></mml:math> larger and slower. Under post-training quantization, the model shrinks to <mml:math id="mml-ieqn-9"><mml:mn>5.94</mml:mn></mml:math> MB with <mml:math id="mml-ieqn-10"><mml:mn>13</mml:mn></mml:math> ms latency and maintains a competitive balance between accuracy and efficiency compared to other optimized variants. These results demonstrate that a coherent combination of lightweight architecture design, attention mechanisms, metric-learning objectives, hard negative mining, and knowledge distillation yields a practical iris recognition solution suitable for secure, real-time authentication on mobile and embedded platforms.},
DOI = {10.32604/cmc.2026.076623}
}



