@inproceedings{tiedemann-scherrer-2019-measuring,
title = "Measuring Semantic Abstraction of Multilingual {NMT} with Paraphrase Recognition and Generation Tasks",
author = {Tiedemann, J{\"o}rg and
Scherrer, Yves},
editor = "Rogers, Anna and
Drozd, Aleksandr and
Rumshisky, Anna and
Goldberg, Yoav",
booktitle = "Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for {NLP}",
month = jun,
year = "2019",
address = "Minneapolis, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-2005",
doi = "10.18653/v1/W19-2005",
pages = "35--42",
abstract = "In this paper, we investigate whether multilingual neural translation models learn stronger semantic abstractions of sentences than bilingual ones. We test this hypotheses by measuring the perplexity of such models when applied to paraphrases of the source language. The intuition is that an encoder produces better representations if a decoder is capable of recognizing synonymous sentences in the same language even though the model is never trained for that task. In our setup, we add 16 different auxiliary languages to a bidirectional bilingual baseline model (English-French) and test it with in-domain and out-of-domain paraphrases in English. The results show that the perplexity is significantly reduced in each of the cases, indicating that meaning can be grounded in translation. This is further supported by a study on paraphrase generation that we also include at the end of the paper.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tiedemann-scherrer-2019-measuring">
<titleInfo>
<title>Measuring Semantic Abstraction of Multilingual NMT with Paraphrase Recognition and Generation Tasks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jörg</namePart>
<namePart type="family">Tiedemann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yves</namePart>
<namePart type="family">Scherrer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandr</namePart>
<namePart type="family">Drozd</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rumshisky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Minneapolis, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we investigate whether multilingual neural translation models learn stronger semantic abstractions of sentences than bilingual ones. We test this hypotheses by measuring the perplexity of such models when applied to paraphrases of the source language. The intuition is that an encoder produces better representations if a decoder is capable of recognizing synonymous sentences in the same language even though the model is never trained for that task. In our setup, we add 16 different auxiliary languages to a bidirectional bilingual baseline model (English-French) and test it with in-domain and out-of-domain paraphrases in English. The results show that the perplexity is significantly reduced in each of the cases, indicating that meaning can be grounded in translation. This is further supported by a study on paraphrase generation that we also include at the end of the paper.</abstract>
<identifier type="citekey">tiedemann-scherrer-2019-measuring</identifier>
<identifier type="doi">10.18653/v1/W19-2005</identifier>
<location>
<url>https://aclanthology.org/W19-2005</url>
</location>
<part>
<date>2019-06</date>
<extent unit="page">
<start>35</start>
<end>42</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Measuring Semantic Abstraction of Multilingual NMT with Paraphrase Recognition and Generation Tasks
%A Tiedemann, Jörg
%A Scherrer, Yves
%Y Rogers, Anna
%Y Drozd, Aleksandr
%Y Rumshisky, Anna
%Y Goldberg, Yoav
%S Proceedings of the 3rd Workshop on Evaluating Vector Space Representations for NLP
%D 2019
%8 June
%I Association for Computational Linguistics
%C Minneapolis, USA
%F tiedemann-scherrer-2019-measuring
%X In this paper, we investigate whether multilingual neural translation models learn stronger semantic abstractions of sentences than bilingual ones. We test this hypotheses by measuring the perplexity of such models when applied to paraphrases of the source language. The intuition is that an encoder produces better representations if a decoder is capable of recognizing synonymous sentences in the same language even though the model is never trained for that task. In our setup, we add 16 different auxiliary languages to a bidirectional bilingual baseline model (English-French) and test it with in-domain and out-of-domain paraphrases in English. The results show that the perplexity is significantly reduced in each of the cases, indicating that meaning can be grounded in translation. This is further supported by a study on paraphrase generation that we also include at the end of the paper.
%R 10.18653/v1/W19-2005
%U https://aclanthology.org/W19-2005
%U https://doi.org/10.18653/v1/W19-2005
%P 35-42
Markdown (Informal)
[Measuring Semantic Abstraction of Multilingual NMT with Paraphrase Recognition and Generation Tasks](https://aclanthology.org/W19-2005) (Tiedemann & Scherrer, RepEval 2019)
ACL