
@Article{cmc.2025.063721,
AUTHOR = {Uddagiri Sirisha, Chanumolu Kiran Kumar, Revathi Durgam, Poluru Eswaraiah, G Muni Nagamani},
TITLE = {An Analytical Review of Large Language Models Leveraging KDGI Fine-Tuning, Quantum Embedding’s, and Multimodal Architectures},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {83},
YEAR = {2025},
NUMBER = {3},
PAGES = {4031--4059},
URL = {http://www.techscience.com/cmc/v83n3/61056},
ISSN = {1546-2226},
ABSTRACT = {A complete examination of Large Language Models’ strengths, problems, and applications is needed due to their rising use across disciplines. Current studies frequently focus on single-use situations and lack a comprehensive understanding of LLM architectural performance, strengths, and weaknesses. This gap precludes finding the appropriate models for task-specific applications and limits awareness of emerging LLM optimization and deployment strategies. In this research, 50 studies on 25+ LLMs, including GPT-3, GPT-4, Claude 3.5, DeepKet, and hybrid multimodal frameworks like ContextDET and GeoRSCLIP, are thoroughly reviewed. We propose LLM application taxonomy by grouping techniques by task focus—healthcare, chemistry, sentiment analysis, agent-based simulations, and multimodal integration. Advanced methods like parameter-efficient tuning (LoRA), quantum-enhanced embeddings (DeepKet), retrieval-augmented generation (RAG), and safety-focused models (GalaxyGPT) are evaluated for dataset requirements, computational efficiency, and performance measures. Frameworks for ethical issues, data limited hallucinations, and KDGI-enhanced fine-tuning like Woodpecker’s post-remedy corrections are highlighted. The investigation’s scope, mad, and methods are described, but the primary results are not. The work reveals that domain-specialized fine-tuned LLMs employing RAG and quantum-enhanced embeddings perform better for context-heavy applications. In medical text normalization, ChatGPT-4 outperforms previous models, while two multimodal frameworks, GeoRSCLIP, increase remote sensing. Parameter-efficient tuning technologies like LoRA have minimal computing cost and similar performance, demonstrating the necessity for adaptive models in multiple domains. To discover the optimum domain-specific models, explain domain-specific fine-tuning, and present quantum and multimodal LLMs to address scalability and cross-domain issues. The framework helps academics and practitioners identify, adapt, and innovate LLMs for different purposes. This work advances the field of efficient, interpretable, and ethical LLM application research.},
DOI = {10.32604/cmc.2025.063721}
}



