
@Article{cmc.2025.069175,
AUTHOR = {Panyu Liu, Tongqing Zhou, Guofeng Lu, Huaizhe Zhou, Zhiping Cai},
TITLE = {Division in Unity: Towards Efficient and Privacy-Preserving Learning of Healthcare Data},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {85},
YEAR = {2025},
NUMBER = {2},
PAGES = {2913--2934},
URL = {http://www.techscience.com/cmc/v85n2/63851},
ISSN = {1546-2226},
ABSTRACT = {The isolation of healthcare data among worldwide hospitals and institutes forms barriers for fully realizing the data-hungry artificial intelligence (AI) models promises in renewing medical services. To overcome this, privacy-preserving distributed learning frameworks, represented by swarm learning and federated learning, have been investigated recently with the sensitive healthcare data retaining in its local premises. However, existing frameworks use a one-size-fits-all mode that tunes one model for all healthcare situations, which could hardly fit the usually diverse disease prediction in practice. This work introduces the idea of ensemble learning into privacy-preserving distributed learning and presents the En-split framework, where the predictions of multiple expert models with specialized diagnostic capabilities are jointly explored. Considering the exacerbation of communication and computation burdens with multiple models during learning, model split is used to partition targeted models into two parts, with hospitals focusing on building the feature-enriched shallow layers. Meanwhile, dedicated noises are implemented to the edge layers for differential privacy protection. Experiments on two public datasets demonstrate En-split’s superior performance on accuracy and efficiency, compared with existing distributed learning frameworks.},
DOI = {10.32604/cmc.2025.069175}
}



