
@Article{cmc.2026.074398,
AUTHOR = {Chu Thi Minh Hue, Nguyen Minh Quy},
TITLE = {A PPO-Based DRL Approach for Scalable Communication in Civilian UAV Networks},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {87},
YEAR = {2026},
NUMBER = {2},
PAGES = {--},
URL = {http://www.techscience.com/cmc/v87n2/66587},
ISSN = {1546-2226},
ABSTRACT = {Nowadays, Unmanned Aerial Vehicles (UAVs) are making increasingly important contributions to numerous applications that enhance human quality of life, such as sensing and data collection, computing, and communication. However, communication between UAVs still faces challenges due to high-dynamic topology, volatile wireless links, and strict energy budgets. In this work, we introduce an improved communication scheme, namely Proximal Policy Optimization (PPO). Our solution casts hop–by–hop relay selection as a Markov decision process and develops a decentralized Proximal Policy Optimization framework in an actor–critic form. A key novelty is the design of the reward function, which jointly considers the delivery ratio, end-to-end delay, and energy efficiency, enabling flexible prioritization in dynamic environments. The simulation results across swarms of 20–70 UAVs show that, the proposed framework enhances delivery ratio to 5% over a Deep Q-Network baseline (reaching <mml:math id="mml-ieqn-1"><mml:mo>≈</mml:mo><mml:mn>80</mml:mn><mml:mi mathvariant="normal">%</mml:mi></mml:math> at 70 nodes), reduces latency by about 2–3 ms in medium-to-dense settings (from <mml:math id="mml-ieqn-2"><mml:mo>∼</mml:mo><mml:mn>43</mml:mn></mml:math> to 35–36 ms), and attains comparable or slightly lower total energy consumption (typically 0.5%–2% lower). The results indicate that the proposed communication scheme, adaptive and scalable learning-based UAV scenarios, pave the way for re-world UAV deployments.},
DOI = {10.32604/cmc.2026.074398}
}



