
@Article{cmc.2025.070328,
AUTHOR = {Wei Liu, Ruiyang Wang, Guangwei Liu},
TITLE = {Adaptive Path-Planning for Autonomous Robots: A UCH-Enhanced Q-Learning Approach},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {86},
YEAR = {2026},
NUMBER = {2},
PAGES = {1--23},
URL = {http://www.techscience.com/cmc/v86n2/64747},
ISSN = {1546-2226},
ABSTRACT = {Q-learning is a classical reinforcement learning method with broad applicability. It can respond effectively to environmental changes and provide flexible strategies, making it suitable for solving robot path-planning problems. However, Q-learning faces challenges in search and update efficiency. To address these issues, we propose an improved Q-learning (IQL) algorithm. We use an enhanced Ant Colony Optimization (ACO) algorithm to optimize Q-table initialization. We also introduce the UCH mechanism to refine the reward function and overcome the exploration dilemma. The IQL algorithm is extensively tested in three grid environments of different scales. The results validate the accuracy of the method and demonstrate superior path-planning performance compared to traditional approaches. The algorithm reduces the number of trials required for convergence, improves learning efficiency, and enables faster adaptation to environmental changes. It also enhances stability and accuracy by reducing the standard deviation of trials to zero. On grid maps of different sizes, IQL achieves higher expected returns. Compared with the original Q-learning algorithm, IQL improves performance by 12.95%, 18.28%, and 7.98% on 10 ∗ 10, 20 ∗ 20, and 30 ∗ 30 maps, respectively. The proposed algorithm has promising applications in robotics, path planning, intelligent transportation, aerospace, and game development.},
DOI = {10.32604/cmc.2025.070328}
}



