
@Article{cmc.2023.044735,
AUTHOR = {Chao Dong, Xiangkui Jiang},
TITLE = {An Intelligent Detection Method for Optical Remote Sensing Images Based on Improved YOLOv7},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {77},
YEAR = {2023},
NUMBER = {3},
PAGES = {3015--3036},
URL = {http://www.techscience.com/cmc/v77n3/55058},
ISSN = {1546-2226},
ABSTRACT = {To address the issue of imbalanced detection performance and detection speed in current mainstream object detection algorithms for optical remote sensing images, this paper proposes a multi-scale object detection model for remote sensing images on complex backgrounds, called DI-YOLO, based on You Only Look Once v7-tiny (YOLOv7-tiny). Firstly, to enhance the model’s ability to capture irregular-shaped objects and deformation features, as well as to extract high-level semantic information, deformable convolutions are used to replace standard convolutions in the original model. Secondly, a Content Coordination Attention Feature Pyramid Network (CCA-FPN) structure is designed to replace the Neck part of the original model, which can further perceive relationships between different pixels, reduce feature loss in remote sensing images, and improve the overall model’s ability to detect multi-scale objects. Thirdly, an Implicitly Efficient Decoupled Head (IEDH) is proposed to increase the model's flexibility, making it more adaptable to complex detection tasks in various scenarios. Finally, the Smoothed Intersection over Union (SIoU) loss function replaces the Complete Intersection over Union (CIoU) loss function in the original model, resulting in more accurate prediction of bounding boxes and continuous model optimization. Experimental results on the High-Resolution Remote Sensing Detection (HRRSD) dataset demonstrate that the proposed DI-YOLO model outperforms mainstream target detection algorithms in terms of mean Average Precision (mAP) for optical remote sensing image detection. Furthermore, it achieves Frames Per Second (FPS) of 138.9, meeting fast and accurate detection requirements.},
DOI = {10.32604/cmc.2023.044735}
}



