@inproceedings{lapshinova-koltunski-etal-2019-analysing,
title = "Analysing Coreference in Transformer Outputs",
author = "Lapshinova-Koltunski, Ekaterina and
Espa{\~n}a-Bonet, Cristina and
van Genabith, Josef",
editor = "Popescu-Belis, Andrei and
Lo{\'a}iciga, Sharid and
Hardmeier, Christian and
Xiong, Deyi",
booktitle = "Proceedings of the Fourth Workshop on Discourse in Machine Translation (DiscoMT 2019)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-6501",
doi = "10.18653/v1/D19-6501",
pages = "1--12",
abstract = "We analyse coreference phenomena in three neural machine translation systems trained with different data settings with or without access to explicit intra- and cross-sentential anaphoric information. We compare system performance on two different genres: news and TED talks. To do this, we manually annotate (the possibly incorrect) coreference chains in the MT outputs and evaluate the coreference chain translations. We define an error typology that aims to go further than pronoun translation adequacy and includes types such as incorrect word selection or missing words. The features of coreference chains in automatic translations are also compared to those of the source texts and human translations. The analysis shows stronger potential translationese effects in machine translated outputs than in human translations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lapshinova-koltunski-etal-2019-analysing">
<titleInfo>
<title>Analysing Coreference in Transformer Outputs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Lapshinova-Koltunski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cristina</namePart>
<namePart type="family">España-Bonet</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Josef</namePart>
<namePart type="family">van Genabith</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Discourse in Machine Translation (DiscoMT 2019)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andrei</namePart>
<namePart type="family">Popescu-Belis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sharid</namePart>
<namePart type="family">Loáiciga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Deyi</namePart>
<namePart type="family">Xiong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We analyse coreference phenomena in three neural machine translation systems trained with different data settings with or without access to explicit intra- and cross-sentential anaphoric information. We compare system performance on two different genres: news and TED talks. To do this, we manually annotate (the possibly incorrect) coreference chains in the MT outputs and evaluate the coreference chain translations. We define an error typology that aims to go further than pronoun translation adequacy and includes types such as incorrect word selection or missing words. The features of coreference chains in automatic translations are also compared to those of the source texts and human translations. The analysis shows stronger potential translationese effects in machine translated outputs than in human translations.</abstract>
<identifier type="citekey">lapshinova-koltunski-etal-2019-analysing</identifier>
<identifier type="doi">10.18653/v1/D19-6501</identifier>
<location>
<url>https://aclanthology.org/D19-6501</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>1</start>
<end>12</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Analysing Coreference in Transformer Outputs
%A Lapshinova-Koltunski, Ekaterina
%A España-Bonet, Cristina
%A van Genabith, Josef
%Y Popescu-Belis, Andrei
%Y Loáiciga, Sharid
%Y Hardmeier, Christian
%Y Xiong, Deyi
%S Proceedings of the Fourth Workshop on Discourse in Machine Translation (DiscoMT 2019)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F lapshinova-koltunski-etal-2019-analysing
%X We analyse coreference phenomena in three neural machine translation systems trained with different data settings with or without access to explicit intra- and cross-sentential anaphoric information. We compare system performance on two different genres: news and TED talks. To do this, we manually annotate (the possibly incorrect) coreference chains in the MT outputs and evaluate the coreference chain translations. We define an error typology that aims to go further than pronoun translation adequacy and includes types such as incorrect word selection or missing words. The features of coreference chains in automatic translations are also compared to those of the source texts and human translations. The analysis shows stronger potential translationese effects in machine translated outputs than in human translations.
%R 10.18653/v1/D19-6501
%U https://aclanthology.org/D19-6501
%U https://doi.org/10.18653/v1/D19-6501
%P 1-12
Markdown (Informal)
[Analysing Coreference in Transformer Outputs](https://aclanthology.org/D19-6501) (Lapshinova-Koltunski et al., DiscoMT 2019)
ACL
- Ekaterina Lapshinova-Koltunski, Cristina España-Bonet, and Josef van Genabith. 2019. Analysing Coreference in Transformer Outputs. In Proceedings of the Fourth Workshop on Discourse in Machine Translation (DiscoMT 2019), pages 1–12, Hong Kong, China. Association for Computational Linguistics.