@InProceedings{fukui-oshikiri-shimodaira:2017:TextGraphs-11,
  author    = {Fukui, Kazuki  and  Oshikiri, Takamasa  and  Shimodaira, Hidetoshi},
  title     = {Spectral Graph-Based Method of Multimodal Word Embedding},
  booktitle = {Proceedings of TextGraphs-11: the Workshop on Graph-based Methods for Natural Language Processing},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {39--44},
  abstract  = {In this paper, we propose a novel method for multimodal word embedding, which
	exploit a generalized framework of multi-view spectral graph embedding to take
	into account visual appearances or scenes denoted by words in a corpus.  
	We evaluated our method through word similarity tasks and
	a concept-to-image search task, having found that it provides word
	representations that reflect visual information, while somewhat trading-off the
	performance on the word similarity tasks. Moreover, we demonstrate that our
	method captures multimodal linguistic regularities, which enable recovering
	relational similarities between words and images by vector arithmetics.},
  url       = {http://www.aclweb.org/anthology/W17-2405}
}

