
@Article{cmes.2026.074428,
AUTHOR = {Mangyu Lee, Jaekyun Jeong, Yun Wook Choo, Keejun Han, Jungeun Kim},
TITLE = {EDTM: Efficient Domain Transition for Multi-Source Domain Adaptation},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {146},
YEAR = {2026},
NUMBER = {2},
PAGES = {--},
URL = {http://www.techscience.com/CMES/v146n2/66301},
ISSN = {1526-1506},
ABSTRACT = {Domain adaptation aims to reduce the distribution gap between the training data (source domain) and the target data. This enables effective predictions even for domains not seen during training. However, most conventional domain adaptation methods assume a single source domain, making them less suitable for modern deep learning settings that rely on diverse and large-scale datasets. To address this limitation, recent research has focused on Multi-Source Domain Adaptation (MSDA), which aims to learn effectively from multiple source domains. In this paper, we propose Efficient Domain Transition for Multi-source (EDTM), a novel and efficient framework designed to tackle two major challenges in existing MSDA approaches: (1) integrating knowledge across different source domains and (2) aligning label distributions between source and target domains. EDTM leverages an ensemble-based classifier expert mechanism to enhance the contribution of source domains that are more similar to the target domain. To further stabilize the learning process and improve performance, we incorporate imitation learning into the training of the target model. In addition, Maximum Classifier Discrepancy (MCD) is employed to align class-wise label distributions between the source and target domains. Experiments were conducted using Digits-Five, one of the most representative benchmark datasets for MSDA. The results show that EDTM consistently outperforms existing methods in terms of average classification accuracy. Notably, EDTM achieved significantly higher performance on target domains such as Modified National Institute of Standards and Technolog with blended background images(MNIST-M) and Street View House Numbers(SVHN) datasets, demonstrating enhanced generalization compared to baseline approaches. Furthermore, an ablation study analyzing the contribution of each loss component validated the effectiveness of the framework, highlighting the importance of each module in achieving optimal performance.},
DOI = {10.32604/cmes.2026.074428}
}



