@Article{iasc.2023.028126, AUTHOR = {A. Sivaranjani, B. Vinod}, TITLE = {Artificial Potential Field Incorporated Deep-Q-Network Algorithm for Mobile Robot Path Prediction}, JOURNAL = {Intelligent Automation \& Soft Computing}, VOLUME = {35}, YEAR = {2023}, NUMBER = {1}, PAGES = {1135--1150}, URL = {http://www.techscience.com/iasc/v35n1/48174}, ISSN = {2326-005X}, ABSTRACT = {Autonomous navigation of mobile robots is a challenging task that requires them to travel from their initial position to their destination without collision in an environment. Reinforcement Learning methods enable a state action function in mobile robots suited to their environment. During trial-and-error interaction with its surroundings, it helps a robot to find an ideal behavior on its own. The Deep Q Network (DQN) algorithm is used in TurtleBot 3 (TB3) to achieve the goal by successfully avoiding the obstacles. But it requires a large number of training iterations. This research mainly focuses on a mobility robot’s best path prediction utilizing DQN and the Artificial Potential Field (APF) algorithms. First, a TB3 Waffle Pi DQN is built and trained to reach the goal. Then the APF shortest path algorithm is incorporated into the DQN algorithm. The proposed planning approach is compared with the standard DQN method in a virtual environment based on the Robot Operation System (ROS). The results from the simulation show that the combination is effective for DQN and APF gives a better optimal path and takes less time when compared to the conventional DQN algorithm. The performance improvement rate of the proposed DQN + APF in comparison with DQN in terms of the number of successful targets is attained by 88%. The performance of the proposed DQN + APF in comparison with DQN in terms of average time is achieved by 0.331 s. The performance of the proposed DQN + APF in comparison with DQN average rewards in which the positive goal is attained by 85% and the negative goal is attained by −90%.}, DOI = {10.32604/iasc.2023.028126} }