
@Article{ee.2025.073912,
AUTHOR = {Ximing Zhang, Zhuohuan Li, Xuexia Quan, Kai Cheng, Yang Yu},
TITLE = {Curriculum-Learning-Guided Multi-Agent Deep Reinforcement Learning for N-1 Static Security Prevention and Control},
JOURNAL = {Energy Engineering},
VOLUME = {},
YEAR = {},
NUMBER = {},
PAGES = {{pages}},
URL = {http://www.techscience.com/energy/online/detail/25430},
ISSN = {1546-0118},
ABSTRACT = {The “N-1” criterion represents a fundamental principle for assessing the reliability of power systems in static security analysis. Existing studies mainly rely on centralized single-agent reinforcement learning frameworks, where centralized control is difficult to cope with regional autonomy and communication delays. In high-dimensional state–action spaces, these approaches often suffer from low efficiency and unstable policies, limiting their applicability to large-scale grids. To address these issues, this paper proposes a Multi-Agent Deep Reinforcement Learning (MADRL) method enhanced with Curriculum Learning (CL) and Prioritized Experience Replay (PER). The proposed framework adopts a Centralized Training with Decentralized Execution (CTDE) paradigm, where independent agents are assigned to different system regions to enable autonomous decision-making and interregional coordination. In addition, the Actor–Critic (AC) architecture is refined with optimized value update rules to mitigate Q-value overestimation. A curriculum learning mechanism based on source–load fluctuation intensity further guides agents from simple to complex operating conditions, enhancing convergence and policy robustness. Simulation results on the IEEE 39-bus system demonstrate that the proposed method efficiently generates coordinated multi-region control strategies, eliminates voltage and current violations under N-1 contingencies, and consistently outperforms the baseline MADRL approach in terms of decision performance and robustness under fluctuating source–load scenarios.},
DOI = {10.32604/ee.2025.073912}
}



