
@Article{cmc.2026.080444,
AUTHOR = {Akmalbek Abdusalomov, Mukhriddin Mukhiddinov, Fakhriddin Abdirazakov, Alpamis Kutlimuratov, Nodira Alimova, Ilyos Kalandarov, Ayhan Istanbullu, Rashid Nasimov, Young-Im Cho},
TITLE = {A Prosody-Guided Multi-Stream Framework for Universal Detection of AI-Synthesized Speech across Codec and Vocoder Domains},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {},
YEAR = {},
NUMBER = {},
PAGES = {{pages}},
URL = {http://www.techscience.com/cmc/online/detail/26668},
ISSN = {1546-2226},
ABSTRACT = {Recent advancements in AI-synthesized speech have resulted in highly realistic deepfake audio, posing severe threats to authentication systems and digital media trust. Existing detection models struggle to generalize across diverse synthesis methods, especially those involving neural codec-based Audio Language Models (ALMs). In this work, we propose UniTector++, a novel prosody-aware, multi-stream detection architecture that generalizes across vocoder- and codec-based synthesis. UniTector++ incorporates three complementary streams—Whisper-based semantic embeddings, high-level prosodic features, and codec artifact representations—fused through a Multi-Domain Adaptive Graph Attention Fusion (MAGAF) module. Furthermore, an Emotion-Consistency Verification Module (ECVM) reinforces alignment between speech style and prosodic content, and a Universal Adversarial Robustness (UAR) head improves resistance against adversarial attacks. Evaluated on three benchmark datasets—ASVspoof2021, PolyFake, and Codecfake—UniTector++ achieves state-of-the-art performance with average Equal Error Rate (EER) of 0.57% under unseen synthesis scenarios, outperforming competitive baselines by a relative margin of 28%. Our results demonstrate the model’s superior generalization, interpretability, and robustness, offering a significant advancement in universal deepfake speech detection.},
DOI = {10.32604/cmc.2026.080444}
}



