
@Article{jai.2020.09738,
AUTHOR = {Kun Fang, Jianquan Ouyang},
TITLE = {Classification Algorithm Optimization Based on Triple-GAN},
JOURNAL = {Journal on Artificial Intelligence},
VOLUME = {2},
YEAR = {2020},
NUMBER = {1},
PAGES = {1--15},
URL = {http://www.techscience.com/jai/v2n1/39511},
ISSN = {2579-003X},
ABSTRACT = {Generating an Adversarial network (GAN) has shown great development 
prospects in image generation and semi-supervised learning and has evolved into TripleGAN. However, there are still two problems that need to be solved in Triple-GAN: based 
on the KL divergence distribution structure, gradients are easy to disappear and training 
instability occurs. Since Triple-GAN tags the samples manually, the manual marking 
workload is too large. Marked uneven and so on. This article builds on this improved 
Triple-GAN model (Improved Triple-GAN), which uses Random Forests to classify real 
samples, automate tagging of leaf nodes, and use Least Squares Generative Adversarial 
Networks (LSGAN) ideological structure loss function to avoid gradients disappear. 
Experiments were performed on the Improved Triple-GAN model and the Triple-GAN 
model using the MINIST, cifar10 and cifar100 datasets respectively, experiments show 
that the error rate of generated samples is greatly reduced. At the same time, the 
classification effect of the data set and the sharpness of the samples are greatly improved. 
And it has greatly improved the stability of model training and automation of labels.},
DOI = {10.32604/jai.2020.09738}
}



