@Article{cmc.2022.026784, AUTHOR = {Shaozhe Guo, Yong Li, Xuyang Chen, Youshan Zhang}, TITLE = {Anchor-free Siamese Network Based on Visual Tracking}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {73}, YEAR = {2022}, NUMBER = {2}, PAGES = {3137--3148}, URL = {http://www.techscience.com/cmc/v73n2/48328}, ISSN = {1546-2226}, ABSTRACT = {The Visual tracking problem can usually be solved in two parts. The first part is to extract the feature of the target and get the candidate region. The second part is to realize the classification of the target and the regression of the bounding box. In recent years, Siameses network in visual tracking problem has always been a frontier research hotspot. In this work, it applies two branches namely search area and tracking template area for similar learning to track. Some related researches prove the feasibility of this network structure. According to the characteristics of two branch shared networks in Siamese network, we also propos a new fully convolutional Siamese network to solve the visual tracking problem. Based on the Siamese network structure, the network we designed adopt a new fusion module, which realizes the fusion of multiple feature layers at different depths. We also devise a better target state estimation criterion. The overall structure is simple, efficient and has wide applicability. We extensive experiments on challenging benchmarks including generic object tracking-10k (GOT-10K), online object tracking benckmark2015 (OTB2015) and unmanned air vehicle123 (UAV123), and comparisons with state-of-the-art trackers and the fusion module commonly used in the past, Finally, our network performed better under the same backbone, and achieved good tracking effect, which proved the effectiveness and universality of our designed network and feature fusion method.}, DOI = {10.32604/cmc.2022.026784} }