
@Article{cmc.2026.077087,
AUTHOR = {Nahdi Saubari, Kunfeng Wang, Rachmat Muwardi, Andri Pranolo},
TITLE = {Improving Convolutional Neural Network Performance Using Alpha-Based Adaptive Pooling for Image Classification},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {87},
YEAR = {2026},
NUMBER = {3},
PAGES = {--},
URL = {http://www.techscience.com/cmc/v87n3/66949},
ISSN = {1546-2226},
ABSTRACT = {This study proposes an Adaptive Pooling method based on an alpha (α) parameter to enhance the effectiveness and stability of convolutional neural networks (CNNs) in image classification tasks. Conventional pooling techniques, such as max pooling and average pooling, often exhibit limited adaptability when applied to datasets with heterogeneous distributions and varying levels of complexity. To address this limitation, the proposed approach introduces an α parameter ranging from 0 to 1 that continuously regulates the contribution of maximum-based and average-based pooling operations in a unified and flexible framework. The proposed method is evaluated using two benchmark datasets, MNIST and CIFAR-10, representing grayscale and color image classification scenarios, respectively. Experiments are conducted across three CNN families with different depths LeNet-5, a deeper custom-built CNN, and ResNet-18 to assess robustness under varying representational capacity. Under the best α setting with a 4 × 4 pooling configuration, Adaptive Pooling exhibits architecture-dependent behavior. On LeNet-5, Adaptive Pooling achieves 87.2% on MNIST and 30.1% on CIFAR-10, compared with 97.8% (max/average pooling) on MNIST and 60.1% (max pooling)/53.9% (average pooling) on CIFAR-10. In contrast, on the deeper custom CNN, Adaptive Pooling becomes competitive, reaching 99.7% on MNIST and 86.1% on CIFAR-10, which is comparable to 99.6%–99.7% on MNIST and 84.5%–86.2% on CIFAR-10 achieved by conventional pooling. On ResNet-18, Adaptive Pooling attains 99.1% on MNIST, while CIFAR-10 performance decreases to 37.2% relative to the default global average pooling baseline (99.7% on MNIST and 89.0% on CIFAR-10), suggesting that performance also depends on where the pooling replacement is applied. Overall, these findings indicate that α-controlled Adaptive Pooling provides a lightweight and configurable pooling strategy that can improve stability and achieve competitive accuracy in deeper CNNs, although it should be treated as a complementary mechanism rather than a universal replacement across all architectures.},
DOI = {10.32604/cmc.2026.077087}
}



