@inproceedings{1ce8b48b1bc2443f834bda0b249c26f2,
title = "SPFusionNet: Sketch segmentation using multi-modal data fusion",
abstract = "The sketch segmentation problem remains largely unsolved because conventional methods are greatly challenged by the highly abstract appearances of freehand sketches and their numerous shape variations. In this work, we tackle such challenges by exploiting different modes of sketch data in a unified framework. Specifically, we propose a deep neural network SPFusionNet to capture the characteristic of sketch by fusing from its image and point set modes. The image modal component SketchNet learns hierarchically abstract ro-bust features and utilizes multi-level representations to produce pixel-wise feature maps, while the point set-modal component SPointNet captures local and global contexts of the sampled point set to produce point-wise feature maps. Then our framework aggregates these feature maps by a fusion network component to generate the sketch segmentation result. The extensive experimental evaluation and comparison with peer methods on our large SketchSeg dataset verify the effectiveness of the proposed framework.",
keywords = "Deep neural network, Multi-modal fusion, Sketch segmentation",
author = "Fei Wang and Shujin Lin and Hefeng Wu and Hanhui Li and Ruomei Wang and Xiaonan Luo and Xiangjian He",
note = "Publisher Copyright: {\textcopyright} 2019 IEEE.; 2019 IEEE International Conference on Multimedia and Expo, ICME 2019 ; Conference date: 08-07-2019 Through 12-07-2019",
year = "2019",
month = jul,
doi = "10.1109/ICME.2019.00285",
language = "English",
series = "Proceedings - IEEE International Conference on Multimedia and Expo",
publisher = "IEEE Computer Society",
pages = "1654--1659",
booktitle = "Proceedings - 2019 IEEE International Conference on Multimedia and Expo, ICME 2019",
address = "United States",
}