
@Article{cmc.2026.074244,
AUTHOR = {Zhe Ding, Hao Yi, Wenrui Xie, Ming Zhang, Yuxuan Xiao, Qixu Wang, Qing Chen, Zhiguang Qin, Dajiang Chen},
TITLE = {Federated Semi-Supervised Learning Based on Feature Space Fusion},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {87},
YEAR = {2026},
NUMBER = {2},
PAGES = {--},
URL = {http://www.techscience.com/cmc/v87n2/66580},
ISSN = {1546-2226},
ABSTRACT = {Federated semi-supervised learning (FSSL) has garnered substantial attention for enabling collaborative global model training across multiple clients to address the scarcity of labeled data and to preserve data privacy. However, FSSL is plagued by formidable challenges stemming from cross-client data heterogeneity, as existing methods fail to achieve effective fusion of feature subspaces across distinct clients. To address this issue, we propose a novel FSSL framework, named FedSPQR, which is explicitly tailored for the label-at-server scenario. On the server side, FedSPQR adopts subspace clustering and fusion method based on the Grassmann manifold to construct a unified global feature space, which is further leveraged to refine the global model. On the client side, the pre-established global feature space acts as a benchmark for aligning the local feature subspaces. Based on the aligned local feature subspaces, integrating self-supervised learning with knowledge distillation facilitates effective local learning to alleviate local bias caused by data heterogeneity. Extensive experiments on two standard public benchmarks confirm that FedSPQR outperforms state-of-the-art (SOTA) baselines by a significant margin.},
DOI = {10.32604/cmc.2026.074244}
}



