@inproceedings{fukui-etal-2017-spectral,
title = "Spectral Graph-Based Method of Multimodal Word Embedding",
author = "Fukui, Kazuki and
Oshikiri, Takamasa and
Shimodaira, Hidetoshi",
editor = "Riedl, Martin and
Somasundaran, Swapna and
Glava{\v{s}}, Goran and
Hovy, Eduard",
booktitle = "Proceedings of {T}ext{G}raphs-11: the Workshop on Graph-based Methods for Natural Language Processing",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-2405",
doi = "10.18653/v1/W17-2405",
pages = "39--44",
abstract = "In this paper, we propose a novel method for multimodal word embedding, which exploit a generalized framework of multi-view spectral graph embedding to take into account visual appearances or scenes denoted by words in a corpus. We evaluated our method through word similarity tasks and a concept-to-image search task, having found that it provides word representations that reflect visual information, while somewhat trading-off the performance on the word similarity tasks. Moreover, we demonstrate that our method captures multimodal linguistic regularities, which enable recovering relational similarities between words and images by vector arithmetics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fukui-etal-2017-spectral">
<titleInfo>
<title>Spectral Graph-Based Method of Multimodal Word Embedding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kazuki</namePart>
<namePart type="family">Fukui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takamasa</namePart>
<namePart type="family">Oshikiri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hidetoshi</namePart>
<namePart type="family">Shimodaira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of TextGraphs-11: the Workshop on Graph-based Methods for Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Martin</namePart>
<namePart type="family">Riedl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Swapna</namePart>
<namePart type="family">Somasundaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Goran</namePart>
<namePart type="family">Glavaš</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eduard</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vancouver, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we propose a novel method for multimodal word embedding, which exploit a generalized framework of multi-view spectral graph embedding to take into account visual appearances or scenes denoted by words in a corpus. We evaluated our method through word similarity tasks and a concept-to-image search task, having found that it provides word representations that reflect visual information, while somewhat trading-off the performance on the word similarity tasks. Moreover, we demonstrate that our method captures multimodal linguistic regularities, which enable recovering relational similarities between words and images by vector arithmetics.</abstract>
<identifier type="citekey">fukui-etal-2017-spectral</identifier>
<identifier type="doi">10.18653/v1/W17-2405</identifier>
<location>
<url>https://aclanthology.org/W17-2405</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>39</start>
<end>44</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Spectral Graph-Based Method of Multimodal Word Embedding
%A Fukui, Kazuki
%A Oshikiri, Takamasa
%A Shimodaira, Hidetoshi
%Y Riedl, Martin
%Y Somasundaran, Swapna
%Y Glavaš, Goran
%Y Hovy, Eduard
%S Proceedings of TextGraphs-11: the Workshop on Graph-based Methods for Natural Language Processing
%D 2017
%8 August
%I Association for Computational Linguistics
%C Vancouver, Canada
%F fukui-etal-2017-spectral
%X In this paper, we propose a novel method for multimodal word embedding, which exploit a generalized framework of multi-view spectral graph embedding to take into account visual appearances or scenes denoted by words in a corpus. We evaluated our method through word similarity tasks and a concept-to-image search task, having found that it provides word representations that reflect visual information, while somewhat trading-off the performance on the word similarity tasks. Moreover, we demonstrate that our method captures multimodal linguistic regularities, which enable recovering relational similarities between words and images by vector arithmetics.
%R 10.18653/v1/W17-2405
%U https://aclanthology.org/W17-2405
%U https://doi.org/10.18653/v1/W17-2405
%P 39-44
Markdown (Informal)
[Spectral Graph-Based Method of Multimodal Word Embedding](https://aclanthology.org/W17-2405) (Fukui et al., TextGraphs 2017)
ACL
- Kazuki Fukui, Takamasa Oshikiri, and Hidetoshi Shimodaira. 2017. Spectral Graph-Based Method of Multimodal Word Embedding. In Proceedings of TextGraphs-11: the Workshop on Graph-based Methods for Natural Language Processing, pages 39–44, Vancouver, Canada. Association for Computational Linguistics.