
@Article{cmes.2025.073021,
AUTHOR = {Talha Farooq Khan, Majid Hussain, Muhammad Arslan, Muhammad Saeed, Lal Khan, Hsien-Tsung Chang},
TITLE = {LLM-Based Enhanced Clustering for Low-Resource Language: An Empirical Study},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {145},
YEAR = {2025},
NUMBER = {3},
PAGES = {3883--3911},
URL = {http://www.techscience.com/CMES/v145n3/64984},
ISSN = {1526-1506},
ABSTRACT = {Text clustering is an important task because of its vital role in NLP-related tasks. However, existing research on clustering is mainly based on the English language, with limited work on low-resource languages, such as Urdu. Low-resource language text clustering has many drawbacks in the form of limited annotated collections and strong linguistic diversity. The primary aim of this paper is twofold: (1) By introducing a clustering dataset named UNC-2025 comprises 100k Urdu news documents, and (2) a detailed empirical standard of Large Language Model (LLM) improved clustering methods for Urdu text. We explicitly evaluate the behavior of the 11 multilingual and Urdu-specific embeddings on 3 different clustering algorithms. We carefully evaluated our performance based on a set of internal and external measurements of validity. We discover the best configuration of the mBERT embedding with the HDBSCAN algorithm that attains a new state-of-the-art performance with a high score of external validity of 0.95. This new LLM method has created a new strong standard of Urdu text clustering. Importantly, the results confirm the strength and high scalability of the LLM-generated embeddings towards the ability to generalise the fine, subtle semantics needed to discover topics in low-resource settings and open the door to novel NLP applications in underrepresented languages.},
DOI = {10.32604/cmes.2025.073021}
}



