@Article{cmc.2022.026881, AUTHOR = {Ni Ruiwen, Mu Ye,2,3,4, Li Ji, Zhang Tong, Luo Tianye, Feng Ruilong, Gong He,2,3,4, Hu Tianli,2,3,4, Sun Yu,2,3,4, Guo Ying,2,3,4, Li Shijun, Thobela Louis Tyasi}, TITLE = {Segmentation of Remote Sensing Images Based on U-Net Multi-Task Learning}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {73}, YEAR = {2022}, NUMBER = {2}, PAGES = {3263--3274}, URL = {http://www.techscience.com/cmc/v73n2/48329}, ISSN = {1546-2226}, ABSTRACT = {In order to accurately segment architectural features in high-resolution remote sensing images, a semantic segmentation method based on U-net network multi-task learning is proposed. First, a boundary distance map was generated based on the remote sensing image of the ground truth map of the building. The remote sensing image and its truth map were used as the input in the U-net network, followed by the addition of the building ground prediction layer at the end of the U-net network. Based on the ResNet network, a multi-task network with the boundary distance prediction layer was built. Experiments involving the ISPRS aerial remote sensing image building and feature annotation data set show that compared with the full convolutional network combined with the multi-layer perceptron method, the intersection ratio of VGG16 network, VGG16 + boundary prediction, ResNet50 and the method in this paper were increased by 5.15%, 6.946%, 6.41% and 7.86%. The accuracy of the networks was increased to 94.71%, 95.39%, 95.30% and 96.10% respectively, which resulted in high-precision extraction of building features.}, DOI = {10.32604/cmc.2022.026881} }