
@Article{cmc.2025.066532,
AUTHOR = {Kejun Zhang, Meijiao Li, Jiahao Cheng, Jun Wang, Ying Yang},
TITLE = {VRCL: A Discrimination Detection Method for Multilingual and Multimodal Information},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {85},
YEAR = {2025},
NUMBER = {1},
PAGES = {1019--1035},
URL = {http://www.techscience.com/cmc/v85n1/63542},
ISSN = {1546-2226},
ABSTRACT = {With the rapid growth of the Internet and social media, information is widely disseminated in multimodal forms, such as text and images, where discriminatory content can manifest in various ways. Discrimination detection techniques for multilingual and multimodal data can identify potential discriminatory behavior and help foster a more equitable and inclusive cyberspace. However, existing methods often struggle in complex contexts and multilingual environments. To address these challenges, this paper proposes an innovative detection method, using image and multilingual text encoders to separately extract features from different modalities. It continuously updates a historical feature memory bank, aggregates the Top-K most similar samples, and utilizes a Gated Recurrent Unit (GRU) to integrate current and historical features, generating enhanced feature representations with stronger semantic expressiveness to improve the model’s ability to capture discriminatory signals. Experimental results demonstrate that the proposed method exhibits superior discriminative power and detection accuracy in multilingual and multimodal contexts, offering a reliable and effective solution for identifying discriminatory content.},
DOI = {10.32604/cmc.2025.066532}
}



