
@Article{jai.2025.072531,
AUTHOR = {Palak Bari, Gurnur Bedi, Khushi Joshi, Anupama Jawale},
TITLE = {Why Transformers Outperform LSTMs: A Comparative Study on Sarcasm Detection},
JOURNAL = {Journal on Artificial Intelligence},
VOLUME = {7},
YEAR = {2025},
NUMBER = {1},
PAGES = {499--508},
URL = {http://www.techscience.com/jai/v7n1/64530},
ISSN = {2579-003X},
ABSTRACT = {This study investigates sarcasm detection in text using a dataset of 8095 sentences compiled from MUStARD and HuggingFace repositories, balanced across sarcastic and non-sarcastic classes. A sequential baseline model (LSTM) is compared with transformer-based models (RoBERTa and XLNet), integrated with attention mechanisms. Transformers were chosen for their proven ability to capture long-range contextual dependencies, whereas LSTM serves as a traditional benchmark for sequential modeling. Experimental results show that RoBERTa achieves 0.87 accuracy, XLNet 0.83, and LSTM 0.52. These findings confirm that transformer architectures significantly outperform recurrent models in sarcasm detection. Future work will incorporate multimodal features and error analysis to further improve robustness.},
DOI = {10.32604/jai.2025.072531}
}



