
@Article{cmc.2020.07451,
AUTHOR = {Yugang Li, Yongbin Wang, Zhe Chen, Yuting Zhu},
TITLE = {Visual Relationship Detection with Contextual Information},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {63},
YEAR = {2020},
NUMBER = {3},
PAGES = {1575--1589},
URL = {http://www.techscience.com/cmc/v63n3/38894},
ISSN = {1546-2226},
ABSTRACT = {Understanding an image goes beyond recognizing and locating the objects in it, 
the relationships between objects also very important in image understanding. Most 
previous methods have focused on recognizing local predictions of the relationships. But 
real-world image relationships often determined by the surrounding objects and other 
contextual information. In this work, we employ this insight to propose a novel
framework to deal with the problem of visual relationship detection. The core of the 
framework is a relationship inference network, which is a recurrent structure designed for 
combining the global contextual information of the object to infer the relationship of the 
image. Experimental results on Stanford VRD and Visual Genome demonstrate that the
proposed method achieves a good performance both in efficiency and accuracy. Finally, 
we demonstrate the value of visual relationship on two computer vision tasks: image 
retrieval and scene graph generation.},
DOI = {10.32604/cmc.2020.07451}
}



