@inproceedings{nishimura-etal-2018-multi,
title = "Multi-Source Neural Machine Translation with Missing Data",
author = "Nishimura, Yuta and
Sudoh, Katsuhito and
Neubig, Graham and
Nakamura, Satoshi",
editor = "Birch, Alexandra and
Finch, Andrew and
Luong, Thang and
Neubig, Graham and
Oda, Yusuke",
booktitle = "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-2711",
doi = "10.18653/v1/W18-2711",
pages = "92--99",
abstract = "Multi-source translation is an approach to exploit multiple inputs (e.g. in two different languages) to increase translation accuracy. In this paper, we examine approaches for multi-source neural machine translation (NMT) using an incomplete multilingual corpus in which some translations are missing. In practice, many multilingual corpora are not complete due to the difficulty to provide translations in all of the relevant languages (for example, in TED talks, most English talks only have subtitles for a small portion of the languages that TED supports). Existing studies on multi-source translation did not explicitly handle such situations. This study focuses on the use of incomplete multilingual corpora in multi-encoder NMT and mixture of NMT experts and examines a very simple implementation where missing source translations are replaced by a special symbol {\textless}NULL{\textgreater}. These methods allow us to use incomplete corpora both at training time and test time. In experiments with real incomplete multilingual corpora of TED Talks, the multi-source NMT with the {\textless}NULL{\textgreater} tokens achieved higher translation accuracies measured by BLEU than those by any one-to-one NMT systems.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nishimura-etal-2018-multi">
<titleInfo>
<title>Multi-Source Neural Machine Translation with Missing Data</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuta</namePart>
<namePart type="family">Nishimura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Katsuhito</namePart>
<namePart type="family">Sudoh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Satoshi</namePart>
<namePart type="family">Nakamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Neural Machine Translation and Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Birch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Finch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thang</namePart>
<namePart type="family">Luong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Oda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Multi-source translation is an approach to exploit multiple inputs (e.g. in two different languages) to increase translation accuracy. In this paper, we examine approaches for multi-source neural machine translation (NMT) using an incomplete multilingual corpus in which some translations are missing. In practice, many multilingual corpora are not complete due to the difficulty to provide translations in all of the relevant languages (for example, in TED talks, most English talks only have subtitles for a small portion of the languages that TED supports). Existing studies on multi-source translation did not explicitly handle such situations. This study focuses on the use of incomplete multilingual corpora in multi-encoder NMT and mixture of NMT experts and examines a very simple implementation where missing source translations are replaced by a special symbol \textlessNULL\textgreater. These methods allow us to use incomplete corpora both at training time and test time. In experiments with real incomplete multilingual corpora of TED Talks, the multi-source NMT with the \textlessNULL\textgreater tokens achieved higher translation accuracies measured by BLEU than those by any one-to-one NMT systems.</abstract>
<identifier type="citekey">nishimura-etal-2018-multi</identifier>
<identifier type="doi">10.18653/v1/W18-2711</identifier>
<location>
<url>https://aclanthology.org/W18-2711</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>92</start>
<end>99</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-Source Neural Machine Translation with Missing Data
%A Nishimura, Yuta
%A Sudoh, Katsuhito
%A Neubig, Graham
%A Nakamura, Satoshi
%Y Birch, Alexandra
%Y Finch, Andrew
%Y Luong, Thang
%Y Neubig, Graham
%Y Oda, Yusuke
%S Proceedings of the 2nd Workshop on Neural Machine Translation and Generation
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F nishimura-etal-2018-multi
%X Multi-source translation is an approach to exploit multiple inputs (e.g. in two different languages) to increase translation accuracy. In this paper, we examine approaches for multi-source neural machine translation (NMT) using an incomplete multilingual corpus in which some translations are missing. In practice, many multilingual corpora are not complete due to the difficulty to provide translations in all of the relevant languages (for example, in TED talks, most English talks only have subtitles for a small portion of the languages that TED supports). Existing studies on multi-source translation did not explicitly handle such situations. This study focuses on the use of incomplete multilingual corpora in multi-encoder NMT and mixture of NMT experts and examines a very simple implementation where missing source translations are replaced by a special symbol \textlessNULL\textgreater. These methods allow us to use incomplete corpora both at training time and test time. In experiments with real incomplete multilingual corpora of TED Talks, the multi-source NMT with the \textlessNULL\textgreater tokens achieved higher translation accuracies measured by BLEU than those by any one-to-one NMT systems.
%R 10.18653/v1/W18-2711
%U https://aclanthology.org/W18-2711
%U https://doi.org/10.18653/v1/W18-2711
%P 92-99
Markdown (Informal)
[Multi-Source Neural Machine Translation with Missing Data](https://aclanthology.org/W18-2711) (Nishimura et al., NGT 2018)
ACL
- Yuta Nishimura, Katsuhito Sudoh, Graham Neubig, and Satoshi Nakamura. 2018. Multi-Source Neural Machine Translation with Missing Data. In Proceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 92–99, Melbourne, Australia. Association for Computational Linguistics.