
@Article{cmc.2025.064849,
AUTHOR = {Rongbo Sun, Jinlong Fei, Yuefei Zhu, Zhongyu Guo},
TITLE = {Multi-Agent Reinforcement Learning for Moving Target Defense Temporal Decision-Making Approach Based on Stackelberg-FlipIt Games},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {84},
YEAR = {2025},
NUMBER = {2},
PAGES = {3765--3786},
URL = {http://www.techscience.com/cmc/v84n2/62896},
ISSN = {1546-2226},
ABSTRACT = {Moving Target Defense (MTD) necessitates scientifically effective decision-making methodologies for defensive technology implementation. While most MTD decision studies focus on accurately identifying optimal strategies, the issue of optimal defense timing remains underexplored. Current default approaches—periodic or overly frequent MTD triggers—lead to suboptimal trade-offs among system security, performance, and cost. The timing of MTD strategy activation critically impacts both defensive efficacy and operational overhead, yet existing frameworks inadequately address this temporal dimension. To bridge this gap, this paper proposes a Stackelberg-FlipIt game model that formalizes asymmetric cyber conflicts as alternating control over attack surfaces, thereby capturing the dynamic security state evolution of MTD systems. We introduce a belief factor to quantify information asymmetry during adversarial interactions, enhancing the precision of MTD trigger timing. Leveraging this game-theoretic foundation, we employ Multi-Agent Reinforcement Learning (MARL) to derive adaptive temporal strategies, optimized via a novel four-dimensional reward function that holistically balances security, performance, cost, and timing. Experimental validation using IP address mutation against scanning attacks demonstrates stable strategy convergence and accelerated defense response, significantly improving cybersecurity affordability and effectiveness.},
DOI = {10.32604/cmc.2025.064849}
}



