
@Article{cmes.2026.074283,
AUTHOR = {Abdul Jabbar Siddiqui, Saheed Ademola Bello, Muhammad Liman Gambo, Abdul Khader Jilani Saudagar, Mohamad A. Alawad, Amir Hussain},
TITLE = {CANNSkin: A Convolutional Autoencoder Neural Network-Based Model for Skin Cancer Classification},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {146},
YEAR = {2026},
NUMBER = {2},
PAGES = {--},
URL = {http://www.techscience.com/CMES/v146n2/66300},
ISSN = {1526-1506},
ABSTRACT = {Visual diagnosis of skin cancer is challenging due to subtle inter-class similarities, variations in skin texture, the presence of hair, and inconsistent illumination. Deep learning models have shown promise in assisting early detection, yet their performance is often limited by the severe class imbalance present in dermoscopic datasets. This paper proposes CANNSkin, a skin cancer classification framework that integrates a convolutional autoencoder with latent-space oversampling to address this imbalance. The autoencoder is trained to reconstruct lesion images, and its latent embeddings are used as features for classification. To enhance minority-class representation, the Synthetic Minority Oversampling Technique (SMOTE) is applied directly to the latent vectors before classifier training. The encoder and classifier are first trained independently and later fine-tuned end-to-end. On the HAM10000 dataset, CANNSkin achieves an accuracy of <mml:math id="mml-ieqn-1"><mml:mn>93.01</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, a macro-F1 of <mml:math id="mml-ieqn-2"><mml:mn>88.54</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, and an ROC–AUC of <mml:math id="mml-ieqn-3"><mml:mn>98.44</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math>, demonstrating strong robustness across ten test subsets. Evaluation on the more complex ISIC 2019 dataset further confirms the model’s effectiveness, where CANNSkin achieves 94.27% accuracy, 93.95% precision, 94.09% recall, and 99.02% F1-score, supported by high reconstruction fidelity (PSNR 35.03 dB, SSIM 0.86). These results demonstrate the effectiveness of our proposed latent-space balancing and fine-tuned representation learning as a new benchmark method for robust and accurate skin cancer classification across heterogeneous datasets.},
DOI = {10.32604/cmes.2026.074283}
}



