
@Article{cmc.2020.06478,
AUTHOR = {Dongjie Zhu, Haiwen Du, Yundong Sun, Xiaofang Li, Rongning Qu, Hao Hu, Shuangshuang Dong, Helen Min Zhou, Ning Cao,},
TITLE = {Massive Files Prefetching Model Based on LSTM Neural Network  with Cache Transaction Strategy},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {63},
YEAR = {2020},
NUMBER = {2},
PAGES = {979--993},
URL = {http://www.techscience.com/cmc/v63n2/38555},
ISSN = {1546-2226},
ABSTRACT = {In distributed storage systems, file access efficiency has an important impact 
on the real-time nature of information forensics. As a popular approach to improve file 
accessing efficiency, prefetching model can fetches data before it is needed according to 
the file access pattern, which can reduce the I/O waiting time and increase the system 
concurrency. However, prefetching model needs to mine the degree of association 
between files to ensure the accuracy of prefetching. In the massive small file situation, 
the sheer volume of files poses a challenge to the efficiency and accuracy of relevance 
mining. In this paper, we propose a massive files prefetching model based on LSTM 
neural network with cache transaction strategy to improve file access efficiency. Firstly, 
we propose a file clustering algorithm based on temporal locality and spatial locality to 
reduce the computational complexity. Secondly, we propose a definition of cache 
transaction according to files occurrence in cache instead of time-offset distance based 
methods to extract file block feature accurately. Lastly, we innovatively propose a file 
access prediction algorithm based on LSTM neural network which predict the file that 
have high possibility to be accessed. Experiments show that compared with the 
traditional LRU and the plain grouping methods, the proposed model notably increase the 
cache hit rate and effectively reduces the I/O wait time.},
DOI = {10.32604/cmc.2020.06478}
}



