@inproceedings{arivazhagan-etal-2020-translation,
title = "Re-translation versus Streaming for Simultaneous Translation",
author = "Arivazhagan, Naveen and
Cherry, Colin and
Macherey, Wolfgang and
Foster, George",
editor = {Federico, Marcello and
Waibel, Alex and
Knight, Kevin and
Nakamura, Satoshi and
Ney, Hermann and
Niehues, Jan and
St{\"u}ker, Sebastian and
Wu, Dekai and
Mariani, Joseph and
Yvon, Francois},
booktitle = "Proceedings of the 17th International Conference on Spoken Language Translation",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.iwslt-1.27/",
doi = "10.18653/v1/2020.iwslt-1.27",
pages = "220--227",
abstract = "There has been great progress in improving streaming machine translation, a simultaneous paradigm where the system appends to a growing hypothesis as more source content becomes available. We study a related problem in which revisions to the hypothesis beyond strictly appending words are permitted. This is suitable for applications such as live captioning an audio feed. In this setting, we compare custom streaming approaches to re-translation, a straightforward strategy where each new source token triggers a distinct translation from scratch. We find re-translation to be as good or better than state-of-the-art streaming systems, even when operating under constraints that allow very few revisions. We attribute much of this success to a previously proposed data-augmentation technique that adds prefix-pairs to the training data, which alongside wait-k inference forms a strong baseline for streaming translation. We also highlight re-translation`s ability to wrap arbitrarily powerful MT systems with an experiment showing large improvements from an upgrade to its base model."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="arivazhagan-etal-2020-translation">
<titleInfo>
<title>Re-translation versus Streaming for Simultaneous Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Naveen</namePart>
<namePart type="family">Arivazhagan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Colin</namePart>
<namePart type="family">Cherry</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wolfgang</namePart>
<namePart type="family">Macherey</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">George</namePart>
<namePart type="family">Foster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th International Conference on Spoken Language Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcello</namePart>
<namePart type="family">Federico</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Waibel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Knight</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Satoshi</namePart>
<namePart type="family">Nakamura</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hermann</namePart>
<namePart type="family">Ney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Niehues</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Stüker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dekai</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joseph</namePart>
<namePart type="family">Mariani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Francois</namePart>
<namePart type="family">Yvon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>There has been great progress in improving streaming machine translation, a simultaneous paradigm where the system appends to a growing hypothesis as more source content becomes available. We study a related problem in which revisions to the hypothesis beyond strictly appending words are permitted. This is suitable for applications such as live captioning an audio feed. In this setting, we compare custom streaming approaches to re-translation, a straightforward strategy where each new source token triggers a distinct translation from scratch. We find re-translation to be as good or better than state-of-the-art streaming systems, even when operating under constraints that allow very few revisions. We attribute much of this success to a previously proposed data-augmentation technique that adds prefix-pairs to the training data, which alongside wait-k inference forms a strong baseline for streaming translation. We also highlight re-translation‘s ability to wrap arbitrarily powerful MT systems with an experiment showing large improvements from an upgrade to its base model.</abstract>
<identifier type="citekey">arivazhagan-etal-2020-translation</identifier>
<identifier type="doi">10.18653/v1/2020.iwslt-1.27</identifier>
<location>
<url>https://aclanthology.org/2020.iwslt-1.27/</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>220</start>
<end>227</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Re-translation versus Streaming for Simultaneous Translation
%A Arivazhagan, Naveen
%A Cherry, Colin
%A Macherey, Wolfgang
%A Foster, George
%Y Federico, Marcello
%Y Waibel, Alex
%Y Knight, Kevin
%Y Nakamura, Satoshi
%Y Ney, Hermann
%Y Niehues, Jan
%Y Stüker, Sebastian
%Y Wu, Dekai
%Y Mariani, Joseph
%Y Yvon, Francois
%S Proceedings of the 17th International Conference on Spoken Language Translation
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F arivazhagan-etal-2020-translation
%X There has been great progress in improving streaming machine translation, a simultaneous paradigm where the system appends to a growing hypothesis as more source content becomes available. We study a related problem in which revisions to the hypothesis beyond strictly appending words are permitted. This is suitable for applications such as live captioning an audio feed. In this setting, we compare custom streaming approaches to re-translation, a straightforward strategy where each new source token triggers a distinct translation from scratch. We find re-translation to be as good or better than state-of-the-art streaming systems, even when operating under constraints that allow very few revisions. We attribute much of this success to a previously proposed data-augmentation technique that adds prefix-pairs to the training data, which alongside wait-k inference forms a strong baseline for streaming translation. We also highlight re-translation‘s ability to wrap arbitrarily powerful MT systems with an experiment showing large improvements from an upgrade to its base model.
%R 10.18653/v1/2020.iwslt-1.27
%U https://aclanthology.org/2020.iwslt-1.27/
%U https://doi.org/10.18653/v1/2020.iwslt-1.27
%P 220-227
Markdown (Informal)
[Re-translation versus Streaming for Simultaneous Translation](https://aclanthology.org/2020.iwslt-1.27/) (Arivazhagan et al., IWSLT 2020)
ACL