@Article{cmes.2022.022045, AUTHOR = {Zhiyun Yang, Qi Liu, Hao Wu, Xiaodong Liu, Yonghong Zhang}, TITLE = {CEMA-LSTM: Enhancing Contextual Feature Correlation for Radar Extrapolation Using Fine-Grained Echo Datasets}, JOURNAL = {Computer Modeling in Engineering \& Sciences}, VOLUME = {135}, YEAR = {2023}, NUMBER = {1}, PAGES = {45--64}, URL = {http://www.techscience.com/CMES/v135n1/50092}, ISSN = {1526-1506}, ABSTRACT = {Accurate precipitation nowcasting can provide great convenience to the public so they can conduct corresponding arrangements in advance to deal with the possible impact of upcoming heavy rain. Recent relevant research activities have shown their concerns on various deep learning models for radar echo extrapolation, where radar echo maps were used to predict their consequent moment, so as to recognize potential severe convective weather events. However, these approaches suffer from an inaccurate prediction of echo dynamics and unreliable depiction of echo aggregation or dissipation, due to the size limitation of convolution filter, lack of global feature, and less attention to features from previous states. To address the problems, this paper proposes a CEMA-LSTM recurrent unit, which is embedded with a Contextual Feature Correlation Enhancement Block (CEB) and a Multi-Attention Mechanism Block (MAB). The CEB enhances contextual feature correlation and supports its model to memorize significant features for near-future prediction; the MAB uses a position and channel attention mechanism to capture global features of radar echoes. Two practical radar echo datasets were used involving the FREM and CIKM 2017 datasets. Both quantification and visualization of comparative experimental results have demonstrated outperformance of the proposed CEMA-LSTM over recent models, e.g., PhyDNet, MIM and PredRNN++, etc. In particular, compared with the second-ranked model, its average POD, FAR and CSI have been improved by 3.87%, 1.65% and 1.79%, respectively on the FREM, and by 1.42%, 5.60% and 3.16%, respectively on the CIKM 2017.}, DOI = {10.32604/cmes.2022.022045} }