
@Article{iasc.2021.013795,
AUTHOR = {Yajun Zhang, Jianjun Yi, Jiahao Zhang, Yuanhao Chen, Liang He},
TITLE = {Generation of Synthetic Images of Randomly Stacked Object Scenes for Network Training Applications},
JOURNAL = {Intelligent Automation \& Soft Computing},
VOLUME = {27},
YEAR = {2021},
NUMBER = {2},
PAGES = {425--439},
URL = {http://www.techscience.com/iasc/v27n2/41245},
ISSN = {2326-005X},
ABSTRACT = {Image recognition algorithms based on deep learning have been widely developed in recent years owing to their capability of automatically capturing recognition features from image datasets and constantly improving the accuracy and efficiency of the image recognition process. However, the task of training deep learning networks is time-consuming and expensive because large training datasets are generally required, and extensive manpower is needed to annotate each of the images in the training dataset to support the supervised learning process. This task is particularly arduous when the image scenes involve randomly stacked objects. The present work addresses this issue by developing a synthetic training dataset generation method based on OpenGL and the Bullet physics engine which can automatically generate annotated synthetic datasets by simulating the freefall of a collection of objects under the force of gravity. Rigorous statistical comparison of a real image dataset of staked scenes with a synthetic image dataset generated by the proposed approach demonstrates that the two datasets exhibit no significant differences. Moreover, the object detection performances obtained by three popular network architectures trained using the synthetic dataset generated by the proposed approach are demonstrated to be much better than the results of training conducted using a synthetic dataset generated by a conventional cut and paste approach, and these performances are also competitive with the results of training conducted using a dataset composed of real images.},
DOI = {10.32604/iasc.2021.013795}
}



