@Article{cmc.2021.017441, AUTHOR = {Sunqiang Hu, Xiaoyu Li, Yu Deng, Yu Peng, Bin Lin, Shan Yang}, TITLE = {A Semantic Supervision Method for Abstractive Summarization}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {69}, YEAR = {2021}, NUMBER = {1}, PAGES = {145--158}, URL = {http://www.techscience.com/cmc/v69n1/42755}, ISSN = {1546-2226}, ABSTRACT = {In recent years, many text summarization models based on pre-training methods have achieved very good results. However, in these text summarization models, semantic deviations are easy to occur between the original input representation and the representation that passed multi-layer encoder, which may result in inconsistencies between the generated summary and the source text content. The Bidirectional Encoder Representations from Transformers (BERT) improves the performance of many tasks in Natural Language Processing (NLP). Although BERT has a strong capability to encode context, it lacks the fine-grained semantic representation. To solve these two problems, we proposed a semantic supervision method based on Capsule Network. Firstly, we extracted the fine-grained semantic representation of the input and encoded result in BERT by Capsule Network. Secondly, we used the fine-grained semantic representation of the input to supervise the fine-grained semantic representation of the encoded result. Then we evaluated our model on a popular Chinese social media dataset (LCSTS), and the result showed that our model achieved higher ROUGE scores (including R-1, R-2), and our model outperformed baseline systems. Finally, we conducted a comparative study on the stability of the model, and the experimental results showed that our model was more stable.}, DOI = {10.32604/cmc.2021.017441} }