@inproceedings{chen-etal-2020-facebook,
title = "{F}acebook {AI}{'}s {WMT}20 News Translation Task Submission",
author = "Chen, Peng-Jen and
Lee, Ann and
Wang, Changhan and
Goyal, Naman and
Fan, Angela and
Williamson, Mary and
Gu, Jiatao",
booktitle = "Proceedings of the Fifth Conference on Machine Translation",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.wmt-1.8",
pages = "113--125",
abstract = "This paper describes Facebook AI{'}s submission to WMT20 shared news translation task. We focus on the low resource setting and participate in two language pairs, Tamil {\textless}-{\textgreater} English and Inuktitut {\textless}-{\textgreater} English, where there are limited out-of-domain bitext and monolingual data. We approach the low resource problem using two main strategies, leveraging all available data and adapting the system to the target news domain. We explore techniques that leverage bitext and monolingual data from all languages, such as self-supervised model pretraining, multilingual models, data augmentation, and reranking. To better adapt the translation system to the test domain, we explore dataset tagging and fine-tuning on in-domain data. We observe that different techniques provide varied improvements based on the available data of the language pair. Based on the finding, we integrate these techniques into one training pipeline. For En-{\textgreater}Ta, we explore an unconstrained setup with additional Tamil bitext and monolingual data and show that further improvement can be obtained. On the test set, our best submitted systems achieve 21.5 and 13.7 BLEU for Ta-{\textgreater}En and En-{\textgreater}Ta respectively, and 27.9 and 13.0 for Iu-{\textgreater}En and En-{\textgreater}Iu respectively.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chen-etal-2020-facebook">
<titleInfo>
<title>Facebook AI’s WMT20 News Translation Task Submission</title>
</titleInfo>
<name type="personal">
<namePart type="given">Peng-Jen</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ann</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Changhan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naman</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angela</namePart>
<namePart type="family">Fan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mary</namePart>
<namePart type="family">Williamson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiatao</namePart>
<namePart type="family">Gu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Conference on Machine Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes Facebook AI’s submission to WMT20 shared news translation task. We focus on the low resource setting and participate in two language pairs, Tamil \textless-\textgreater English and Inuktitut \textless-\textgreater English, where there are limited out-of-domain bitext and monolingual data. We approach the low resource problem using two main strategies, leveraging all available data and adapting the system to the target news domain. We explore techniques that leverage bitext and monolingual data from all languages, such as self-supervised model pretraining, multilingual models, data augmentation, and reranking. To better adapt the translation system to the test domain, we explore dataset tagging and fine-tuning on in-domain data. We observe that different techniques provide varied improvements based on the available data of the language pair. Based on the finding, we integrate these techniques into one training pipeline. For En-\textgreaterTa, we explore an unconstrained setup with additional Tamil bitext and monolingual data and show that further improvement can be obtained. On the test set, our best submitted systems achieve 21.5 and 13.7 BLEU for Ta-\textgreaterEn and En-\textgreaterTa respectively, and 27.9 and 13.0 for Iu-\textgreaterEn and En-\textgreaterIu respectively.</abstract>
<identifier type="citekey">chen-etal-2020-facebook</identifier>
<location>
<url>https://aclanthology.org/2020.wmt-1.8</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>113</start>
<end>125</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Facebook AI’s WMT20 News Translation Task Submission
%A Chen, Peng-Jen
%A Lee, Ann
%A Wang, Changhan
%A Goyal, Naman
%A Fan, Angela
%A Williamson, Mary
%A Gu, Jiatao
%S Proceedings of the Fifth Conference on Machine Translation
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F chen-etal-2020-facebook
%X This paper describes Facebook AI’s submission to WMT20 shared news translation task. We focus on the low resource setting and participate in two language pairs, Tamil \textless-\textgreater English and Inuktitut \textless-\textgreater English, where there are limited out-of-domain bitext and monolingual data. We approach the low resource problem using two main strategies, leveraging all available data and adapting the system to the target news domain. We explore techniques that leverage bitext and monolingual data from all languages, such as self-supervised model pretraining, multilingual models, data augmentation, and reranking. To better adapt the translation system to the test domain, we explore dataset tagging and fine-tuning on in-domain data. We observe that different techniques provide varied improvements based on the available data of the language pair. Based on the finding, we integrate these techniques into one training pipeline. For En-\textgreaterTa, we explore an unconstrained setup with additional Tamil bitext and monolingual data and show that further improvement can be obtained. On the test set, our best submitted systems achieve 21.5 and 13.7 BLEU for Ta-\textgreaterEn and En-\textgreaterTa respectively, and 27.9 and 13.0 for Iu-\textgreaterEn and En-\textgreaterIu respectively.
%U https://aclanthology.org/2020.wmt-1.8
%P 113-125
Markdown (Informal)
[Facebook AI’s WMT20 News Translation Task Submission](https://aclanthology.org/2020.wmt-1.8) (Chen et al., WMT 2020)
ACL
- Peng-Jen Chen, Ann Lee, Changhan Wang, Naman Goyal, Angela Fan, Mary Williamson, and Jiatao Gu. 2020. Facebook AI’s WMT20 News Translation Task Submission. In Proceedings of the Fifth Conference on Machine Translation, pages 113–125, Online. Association for Computational Linguistics.