@inproceedings{kadar-etal-2018-lessons,
title = "Lessons Learned in Multilingual Grounded Language Learning",
author = "K{\'a}d{\'a}r, {\'A}kos and
Elliott, Desmond and
C{\^o}t{\'e}, Marc-Alexandre and
Chrupa{\l}a, Grzegorz and
Alishahi, Afra",
editor = "Korhonen, Anna and
Titov, Ivan",
booktitle = "Proceedings of the 22nd Conference on Computational Natural Language Learning",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K18-1039",
doi = "10.18653/v1/K18-1039",
pages = "402--412",
abstract = "Recent work has shown how to learn better visual-semantic embeddings by leveraging image descriptions in more than one language. Here, we investigate in detail which conditions affect the performance of this type of grounded language learning model. We show that multilingual training improves over bilingual training, and that low-resource languages benefit from training with higher-resource languages. We demonstrate that a multilingual model can be trained equally well on either translations or comparable sentence pairs, and that annotating the same set of images in multiple language enables further improvements via an additional caption-caption ranking objective.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kadar-etal-2018-lessons">
<titleInfo>
<title>Lessons Learned in Multilingual Grounded Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ákos</namePart>
<namePart type="family">Kádár</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Desmond</namePart>
<namePart type="family">Elliott</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marc-Alexandre</namePart>
<namePart type="family">Côté</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Grzegorz</namePart>
<namePart type="family">Chrupała</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Afra</namePart>
<namePart type="family">Alishahi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd Conference on Computational Natural Language Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Korhonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Titov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent work has shown how to learn better visual-semantic embeddings by leveraging image descriptions in more than one language. Here, we investigate in detail which conditions affect the performance of this type of grounded language learning model. We show that multilingual training improves over bilingual training, and that low-resource languages benefit from training with higher-resource languages. We demonstrate that a multilingual model can be trained equally well on either translations or comparable sentence pairs, and that annotating the same set of images in multiple language enables further improvements via an additional caption-caption ranking objective.</abstract>
<identifier type="citekey">kadar-etal-2018-lessons</identifier>
<identifier type="doi">10.18653/v1/K18-1039</identifier>
<location>
<url>https://aclanthology.org/K18-1039</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>402</start>
<end>412</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Lessons Learned in Multilingual Grounded Language Learning
%A Kádár, Ákos
%A Elliott, Desmond
%A Côté, Marc-Alexandre
%A Chrupała, Grzegorz
%A Alishahi, Afra
%Y Korhonen, Anna
%Y Titov, Ivan
%S Proceedings of the 22nd Conference on Computational Natural Language Learning
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F kadar-etal-2018-lessons
%X Recent work has shown how to learn better visual-semantic embeddings by leveraging image descriptions in more than one language. Here, we investigate in detail which conditions affect the performance of this type of grounded language learning model. We show that multilingual training improves over bilingual training, and that low-resource languages benefit from training with higher-resource languages. We demonstrate that a multilingual model can be trained equally well on either translations or comparable sentence pairs, and that annotating the same set of images in multiple language enables further improvements via an additional caption-caption ranking objective.
%R 10.18653/v1/K18-1039
%U https://aclanthology.org/K18-1039
%U https://doi.org/10.18653/v1/K18-1039
%P 402-412
Markdown (Informal)
[Lessons Learned in Multilingual Grounded Language Learning](https://aclanthology.org/K18-1039) (Kádár et al., CoNLL 2018)
ACL
- Ákos Kádár, Desmond Elliott, Marc-Alexandre Côté, Grzegorz Chrupała, and Afra Alishahi. 2018. Lessons Learned in Multilingual Grounded Language Learning. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 402–412, Brussels, Belgium. Association for Computational Linguistics.