
@Article{cmes.2026.078347,
AUTHOR = {Amel Ksibi, Manel Ayadi, Hela Elmannai, Monia Hamdi, Ala Saleh Alluhaidan, Imen Ksibi},
TITLE = {TransCP-Net: Transformer-Based Spatiotemporal Pose Representation for Early Screening of Infant Cerebral Palsy},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {},
YEAR = {},
NUMBER = {},
PAGES = {{pages}},
URL = {http://www.techscience.com/CMES/online/detail/26833},
ISSN = {1526-1506},
ABSTRACT = {Cerebral palsy is a prevalent neurodevelopmental syndrome that disrupts motor development in children, making early detection vital for effective intervention. Traditional clinical assessments rely on subjective observations, often missing minor motor abnormalities until they become severe, typically after 12 months of age. This article presents a novel deep learning model, TransCP-Net (Transformer-based Cerebral Palsy Network), designed for early detection of infant cerebral palsy through spatiotemporal pose representation learning. The architecture employs hierarchical spatial and temporal attention to analyze complex motion patterns in video sequences, integrating multi-modal data for improved accuracy. TransCP-Net incorporates specialized preprocessing, including temporal smoothing and trajectory encoding, to enhance feature learning. Tests on 1370 infant movement videos yielded impressive results: 94.7% sensitivity, 92.3% specificity, and an AUC-ROC of 0.968, outperforming ten state-of-the-art methods. Notably, it achieved a sensitivity of 96.3% within the critical 9–15 weeks range of fidgety movements, enabling timely interventions. Attention visualization highlights key areas such as the hips and shoulders, reinforcing clinical relevance. TransCP-Net demonstrates effectiveness across diverse clinical settings, serving as a viable, non-invasive tool for early cerebral palsy detection.},
DOI = {10.32604/cmes.2026.078347}
}



