@inproceedings{cifka-bojar-2018-bleu,
title = "Are {BLEU} and Meaning Representation in Opposition?",
author = "C{\'\i}fka, Ond{\v{r}}ej and
Bojar, Ond{\v{r}}ej",
editor = "Gurevych, Iryna and
Miyao, Yusuke",
booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P18-1126",
doi = "10.18653/v1/P18-1126",
pages = "1362--1371",
abstract = "One of possible ways of obtaining continuous-space sentence representations is by training neural machine translation (NMT) systems. The recent attention mechanism however removes the single point in the neural network from which the source sentence representation can be extracted. We propose several variations of the attentive NMT architecture bringing this meeting point back. Empirical evaluation suggests that the better the translation quality, the worse the learned sentence representations serve in a wide range of classification and similarity tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="cifka-bojar-2018-bleu">
<titleInfo>
<title>Are BLEU and Meaning Representation in Opposition?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ondřej</namePart>
<namePart type="family">Cífka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ondřej</namePart>
<namePart type="family">Bojar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Miyao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>One of possible ways of obtaining continuous-space sentence representations is by training neural machine translation (NMT) systems. The recent attention mechanism however removes the single point in the neural network from which the source sentence representation can be extracted. We propose several variations of the attentive NMT architecture bringing this meeting point back. Empirical evaluation suggests that the better the translation quality, the worse the learned sentence representations serve in a wide range of classification and similarity tasks.</abstract>
<identifier type="citekey">cifka-bojar-2018-bleu</identifier>
<identifier type="doi">10.18653/v1/P18-1126</identifier>
<location>
<url>https://aclanthology.org/P18-1126</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>1362</start>
<end>1371</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Are BLEU and Meaning Representation in Opposition?
%A Cífka, Ondřej
%A Bojar, Ondřej
%Y Gurevych, Iryna
%Y Miyao, Yusuke
%S Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F cifka-bojar-2018-bleu
%X One of possible ways of obtaining continuous-space sentence representations is by training neural machine translation (NMT) systems. The recent attention mechanism however removes the single point in the neural network from which the source sentence representation can be extracted. We propose several variations of the attentive NMT architecture bringing this meeting point back. Empirical evaluation suggests that the better the translation quality, the worse the learned sentence representations serve in a wide range of classification and similarity tasks.
%R 10.18653/v1/P18-1126
%U https://aclanthology.org/P18-1126
%U https://doi.org/10.18653/v1/P18-1126
%P 1362-1371
Markdown (Informal)
[Are BLEU and Meaning Representation in Opposition?](https://aclanthology.org/P18-1126) (Cífka & Bojar, ACL 2018)
ACL
- Ondřej Cífka and Ondřej Bojar. 2018. Are BLEU and Meaning Representation in Opposition?. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1362–1371, Melbourne, Australia. Association for Computational Linguistics.