
@Article{cmc.2020.06130,
AUTHOR = {Shiming He, Zhuozhou Li, Yangning Tang, Zhuofan Liao, Feng Li, Se-Jung Lim},
TITLE = {Parameters Compressing in Deep Learning},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {62},
YEAR = {2020},
NUMBER = {1},
PAGES = {321--336},
URL = {http://www.techscience.com/cmc/v62n1/38115},
ISSN = {1546-2226},
ABSTRACT = {With the popularity of deep learning tools in image decomposition and natural
language processing, how to support and store a large number of parameters required by
deep learning algorithms has become an urgent problem to be solved. These parameters
are huge and can be as many as millions. At present, a feasible direction is to use the
sparse representation technique to compress the parameter matrix to achieve the purpose
of reducing parameters and reducing the storage pressure. These methods include matrix
decomposition and tensor decomposition. To let vector take advance of the compressing
performance of matrix decomposition and tensor decomposition, we use reshaping and
unfolding to let vector be the input and output of Tensor-Factorized Neural Networks.
We analyze how reshaping can get the best compress ratio. According to the relationship
between the shape of tensor and the number of parameters, we get a lower bound of the
number of parameters. We take some data sets to verify the lower bound.},
DOI = {10.32604/cmc.2020.06130}
}



