
@Article{cmc.2024.049645,
AUTHOR = {Mahmood Ul Haq, Muhammad Athar Javed Sethi, Najib Ben Aoun, Ala Saleh Alluhaidan, Sadique Ahmad, Zahid farid},
TITLE = {CapsNet-FR: Capsule Networks for Improved Recognition of Facial Features},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {79},
YEAR = {2024},
NUMBER = {2},
PAGES = {2169--2186},
URL = {http://www.techscience.com/cmc/v79n2/56447},
ISSN = {1546-2226},
ABSTRACT = {Face recognition (FR) technology has numerous applications in artificial intelligence including biometrics, security, authentication, law enforcement, and surveillance. Deep learning (DL) models, notably convolutional neural networks (CNNs), have shown promising results in the field of FR. However CNNs are easily fooled since they do not encode position and orientation correlations between features. Hinton et al. envisioned Capsule Networks as a more robust design capable of retaining pose information and spatial correlations to recognize objects more like the brain does. Lower-level capsules hold 8-dimensional vectors of attributes like position, hue, texture, and so on, which are routed to higher-level capsules via a new routing by agreement algorithm. This provides capsule networks with viewpoint invariance, which has previously evaded CNNs. This research presents a FR model based on capsule networks that was tested using the LFW dataset, COMSATS face dataset, and own acquired photos using cameras measuring 128 × 128 pixels, 40 × 40 pixels, and 30 × 30 pixels. The trained model outperforms state-of-the-art algorithms, achieving 95.82% test accuracy and performing well on unseen faces that have been blurred or rotated. Additionally, the suggested model outperformed the recently released approaches on the COMSATS face dataset, achieving a high accuracy of 92.47%. Based on the results of this research as well as previous results, capsule networks perform better than deeper CNNs on unobserved altered data because of their special equivariance properties.},
DOI = {10.32604/cmc.2024.049645}
}



