
@Article{cmes.2023.024018,
AUTHOR = {Bao Rong Chang, Hsiu-Fen Tsai, Han-Lin Chou},
TITLE = {Implementation of Rapid Code Transformation Process Using Deep Learning Approaches},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {136},
YEAR = {2023},
NUMBER = {1},
PAGES = {107--134},
URL = {http://www.techscience.com/CMES/v136n1/51193},
ISSN = {1526-1506},
ABSTRACT = {Our previous work has introduced the newly generated program using the code transformation model GPT-2, verifying the generated programming codes through simhash (SH) and longest common subsequence (LCS) algorithms. However, the entire code transformation process has encountered a time-consuming problem. Therefore, the objective of this study is to speed up the code transformation process significantly. This paper has proposed deep learning approaches for modifying SH using a variational simhash (VSH) algorithm and replacing LCS with a piecewise longest common subsequence (PLCS) algorithm to faster the verification process in the test phase. Besides the code transformation model GPT-2, this study has also introduced Microsoft MASS and Facebook BART for a comparative analysis of their performance. Meanwhile, the explainable AI technique using local interpretable model-agnostic explanations (LIME) can also interpret the decision-making of AI models. The experimental results show that VSH can reduce the number of qualified programs by 22.11%, and PLCS can reduce the execution time of selected pocket programs by 32.39%. As a result, the proposed approaches can significantly speed up the entire code transformation process by 1.38 times on average compared with our previous work.},
DOI = {10.32604/cmes.2023.024018}
}



