
@Article{cmc.2020.09840,
AUTHOR = {Ningcheng Yuan, Chao Jia, Jizhao Lu, Shaoyong Guo, Wencui Li, Xuesong Qiu, Lei Shi},
TITLE = {A DRL-Based Container Placement Scheme with Auxiliary Tasks},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {64},
YEAR = {2020},
NUMBER = {3},
PAGES = {1657--1671},
URL = {http://www.techscience.com/cmc/v64n3/39451},
ISSN = {1546-2226},
ABSTRACT = {Container is an emerging virtualization technology and widely adopted in the 
cloud to provide services because of its lightweight, flexible, isolated and highly portable 
properties. Cloud services are often instantiated as clusters of interconnected containers. 
Due to the stochastic service arrival and complicated cloud environment, it is challenging 
to achieve an optimal container placement (CP) scheme. We propose to leverage Deep 
Reinforcement Learning (DRL) for solving CP problem, which is able to learn from 
experience interacting with the environment and does not rely on mathematical model or 
prior knowledge. However, applying DRL method directly dose not lead to a satisfying 
result because of sophisticated environment states and huge action spaces. In this paper, 
we propose UNREAL-CP, a DRL-based method to place container instances on servers 
while considering end to end delay and resource utilization cost. The proposed method is 
an actor-critic-based approach, which has advantages in dealing with the huge action 
space. Moreover, the idea of auxiliary learning is also included in our architecture. We 
design two auxiliary learning tasks about load balancing to improve algorithm 
performance. Compared to other DRL methods, extensive simulation results show that 
UNREAL-CP performs better up to 28.6% in terms of reducing delay and deployment 
cost with high training efficiency and responding speed.},
DOI = {10.32604/cmc.2020.09840}
}



