@inproceedings{zou-etal-2022-investigating,
title = "Investigating the Impact of Different Pivot Languages on Translation Quality",
author = "Zou, Longhui and
Saeedi, Ali and
Carl, Michael",
editor = "Carl, Michael and
Yamada, Masaru and
Zou, Longui",
booktitle = "Proceedings of the 15th biennial conference of the Association for Machine Translation in the Americas (Workshop 1: Empirical Translation Process Research)",
month = sep,
year = "2022",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2022.amta-wetpr.3",
pages = "15--28",
abstract = "Translating via an intermediate pivot language is a common practice, but the impact of the pivot language on the quality of the final translation has not often been investigated. In order to compare the effect of different pivots, we back-translate 41 English source segments via vari- ous intermediate channels (Arabic, Chinese and monolingual paraphrasing) into English. We compare the 912 English back-translations of the 41 original English segments using manual evaluation, as well as COMET and various incarnations of BLEU. We compare human from- scratch back-translations with MT back-translations and monolingual paraphrasing. A varia- tion of BLEU (Cum-2) seems to better correlate with our manual evaluation than COMET and the conventional BLEU Cum-4, but a fine-grained qualitative analysis reveals that differences between different pivot languages (Arabic and Chinese) are not captured by the automatized TQA measures.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zou-etal-2022-investigating">
<titleInfo>
<title>Investigating the Impact of Different Pivot Languages on Translation Quality</title>
</titleInfo>
<name type="personal">
<namePart type="given">Longhui</namePart>
<namePart type="family">Zou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ali</namePart>
<namePart type="family">Saeedi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Carl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th biennial conference of the Association for Machine Translation in the Americas (Workshop 1: Empirical Translation Process Research)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Carl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masaru</namePart>
<namePart type="family">Yamada</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Longui</namePart>
<namePart type="family">Zou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Machine Translation in the Americas</publisher>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Translating via an intermediate pivot language is a common practice, but the impact of the pivot language on the quality of the final translation has not often been investigated. In order to compare the effect of different pivots, we back-translate 41 English source segments via vari- ous intermediate channels (Arabic, Chinese and monolingual paraphrasing) into English. We compare the 912 English back-translations of the 41 original English segments using manual evaluation, as well as COMET and various incarnations of BLEU. We compare human from- scratch back-translations with MT back-translations and monolingual paraphrasing. A varia- tion of BLEU (Cum-2) seems to better correlate with our manual evaluation than COMET and the conventional BLEU Cum-4, but a fine-grained qualitative analysis reveals that differences between different pivot languages (Arabic and Chinese) are not captured by the automatized TQA measures.</abstract>
<identifier type="citekey">zou-etal-2022-investigating</identifier>
<location>
<url>https://aclanthology.org/2022.amta-wetpr.3</url>
</location>
<part>
<date>2022-09</date>
<extent unit="page">
<start>15</start>
<end>28</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Investigating the Impact of Different Pivot Languages on Translation Quality
%A Zou, Longhui
%A Saeedi, Ali
%A Carl, Michael
%Y Carl, Michael
%Y Yamada, Masaru
%Y Zou, Longui
%S Proceedings of the 15th biennial conference of the Association for Machine Translation in the Americas (Workshop 1: Empirical Translation Process Research)
%D 2022
%8 September
%I Association for Machine Translation in the Americas
%F zou-etal-2022-investigating
%X Translating via an intermediate pivot language is a common practice, but the impact of the pivot language on the quality of the final translation has not often been investigated. In order to compare the effect of different pivots, we back-translate 41 English source segments via vari- ous intermediate channels (Arabic, Chinese and monolingual paraphrasing) into English. We compare the 912 English back-translations of the 41 original English segments using manual evaluation, as well as COMET and various incarnations of BLEU. We compare human from- scratch back-translations with MT back-translations and monolingual paraphrasing. A varia- tion of BLEU (Cum-2) seems to better correlate with our manual evaluation than COMET and the conventional BLEU Cum-4, but a fine-grained qualitative analysis reveals that differences between different pivot languages (Arabic and Chinese) are not captured by the automatized TQA measures.
%U https://aclanthology.org/2022.amta-wetpr.3
%P 15-28
Markdown (Informal)
[Investigating the Impact of Different Pivot Languages on Translation Quality](https://aclanthology.org/2022.amta-wetpr.3) (Zou et al., AMTA 2022)
ACL