
@Article{cmc.2024.050790,
AUTHOR = {Dinh Phu Cuong Le, Dong Wang, Viet-Tuan Le},
TITLE = {A Comprehensive Survey of Recent Transformers in Image, Video and Diffusion Models},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {80},
YEAR = {2024},
NUMBER = {1},
PAGES = {37--60},
URL = {http://www.techscience.com/cmc/v80n1/57376},
ISSN = {1546-2226},
ABSTRACT = {Transformer models have emerged as dominant networks for various tasks in computer vision compared to Convolutional Neural Networks (CNNs). The transformers demonstrate the ability to model long-range dependencies by utilizing a self-attention mechanism. This study aims to provide a comprehensive survey of recent transformer-based approaches in image and video applications, as well as diffusion models. We begin by discussing existing surveys of vision transformers and comparing them to this work. Then, we review the main components of a vanilla transformer network, including the self-attention mechanism, feed-forward network, position encoding, etc. In the main part of this survey, we review recent transformer-based models in three categories: Transformer for downstream tasks, Vision Transformer for Generation, and Vision Transformer for Segmentation. We also provide a comprehensive overview of recent transformer models for video tasks and diffusion models. We compare the performance of various hierarchical transformer networks for multiple tasks on popular benchmark datasets. Finally, we explore some future research directions to further improve the field.},
DOI = {10.32604/cmc.2024.050790}
}



