
@Article{cmc.2024.059018,
AUTHOR = {Xin Tong, Jingya Wang, Ying Yang, Tian Peng, Hanming Zhai, Guangming Ling},
TITLE = {LEGF-DST: LLMs-Enhanced Graph-Fusion Dual-Stream Transformer for Fine-Grained Chinese Malicious SMS Detection},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {82},
YEAR = {2025},
NUMBER = {2},
PAGES = {1901--1924},
URL = {http://www.techscience.com/cmc/v82n2/59488},
ISSN = {1546-2226},
ABSTRACT = {With the widespread use of SMS (Short Message Service), the proliferation of malicious SMS has emerged as a pressing societal issue. While deep learning-based text classifiers offer promise, they often exhibit suboptimal performance in fine-grained detection tasks, primarily due to imbalanced datasets and insufficient model representation capabilities. To address this challenge, this paper proposes an LLMs-enhanced graph fusion dual-stream Transformer model for fine-grained Chinese malicious SMS detection. During the data processing stage, Large Language Models (LLMs) are employed for data augmentation, mitigating dataset imbalance. In the data input stage, both word-level and character-level features are utilized as model inputs, enhancing the richness of features and preventing information loss. A dual-stream Transformer serves as the backbone network in the learning representation stage, complemented by a graph-based feature fusion mechanism. At the output stage, both supervised classification cross-entropy loss and supervised contrastive learning loss are used as multi-task optimization objectives, further enhancing the model’s feature representation. Experimental results demonstrate that the proposed method significantly outperforms baselines on a publicly available Chinese malicious SMS dataset.},
DOI = {10.32604/cmc.2024.059018}
}



