
@Article{cmes.2024.051335,
AUTHOR = {Khushal Das, Fazeel Abid, Jawad Rasheed, Kamlish, Tunc Asuroglu, Shtwai Alsubai, Safeeullah Soomro},
TITLE = {Enhancing Communication Accessibility: UrSL-CNN Approach to Urdu Sign Language Translation for Hearing-Impaired Individuals},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {141},
YEAR = {2024},
NUMBER = {1},
PAGES = {689--711},
URL = {http://www.techscience.com/CMES/v141n1/57686},
ISSN = {1526-1506},
ABSTRACT = {Deaf people or people facing hearing issues can communicate using sign language (SL), a visual language. Many works based on rich source language have been proposed; however, the work using poor resource language is still lacking. Unlike other SLs, the visuals of the Urdu Language are different. This study presents a novel approach to translating Urdu sign language (UrSL) using the UrSL-CNN model, a convolutional neural network (CNN) architecture specifically designed for this purpose. Unlike existing works that primarily focus on languages with rich resources, this study addresses the challenge of translating a sign language with limited resources. We conducted experiments using two datasets containing 1500 and 78,000 images, employing a methodology comprising four modules: data collection, pre-processing, categorization, and prediction. To enhance prediction accuracy, each sign image was transformed into a greyscale image and underwent noise filtering. Comparative analysis with machine learning baseline methods (support vector machine, Gaussian Naive Bayes, random forest, and k-nearest neighbors’ algorithm) on the UrSL alphabets dataset demonstrated the superiority of UrSL-CNN, achieving an accuracy of 0.95. Additionally, our model exhibited superior performance in Precision, Recall, and F1-score evaluations. This work not only contributes to advancing sign language translation but also holds promise for improving communication accessibility for individuals with hearing impairments.},
DOI = {10.32604/cmes.2024.051335}
}



