
@Article{cmc.2024.052097,
AUTHOR = {Chunwei Tian, Haoyang Gao, Pengwei Wang, Bob Zhang},
TITLE = {An Enhanced GAN for Image Generation},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {80},
YEAR = {2024},
NUMBER = {1},
PAGES = {105--118},
URL = {http://www.techscience.com/cmc/v80n1/57408},
ISSN = {1546-2226},
ABSTRACT = {Generative adversarial networks (GANs) with gaming abilities have been widely applied in image generation. However, gamistic generators and discriminators may reduce the robustness of the obtained GANs in image generation under varying scenes. Enhancing the relation of hierarchical information in a generation network and enlarging differences of different network architectures can facilitate more structural information to improve the generation effect for image generation. In this paper, we propose an enhanced GAN via improving a generator for image generation (EIGGAN). EIGGAN applies a spatial attention to a generator to extract salient information to enhance the truthfulness of the generated images. Taking into relation the context account, parallel residual operations are fused into a generation network to extract more structural information from the different layers. Finally, a mixed loss function in a GAN is exploited to make a tradeoff between speed and accuracy to generate more realistic images. Experimental results show that the proposed method is superior to popular methods, i.e., Wasserstein GAN with gradient penalty (WGAN-GP) in terms of many indexes, i.e., Frechet Inception Distance, Learned Perceptual Image Patch Similarity, Multi-Scale Structural Similarity Index Measure, Kernel Inception Distance, Number of Statistically-Different Bins, Inception Score and some visual images for image generation.},
DOI = {10.32604/cmc.2024.052097}
}



