@inproceedings{tokarchuk-etal-2021-integrated,
title = "Integrated Training for Sequence-to-Sequence Models Using Non-Autoregressive Transformer",
author = "Tokarchuk, Evgeniia and
Rosendahl, Jan and
Wang, Weiyue and
Petrushkov, Pavel and
Lancewicki, Tomer and
Khadivi, Shahram and
Ney, Hermann",
editor = "Federico, Marcello and
Waibel, Alex and
Costa-juss{\`a}, Marta R. and
Niehues, Jan and
Stuker, Sebastian and
Salesky, Elizabeth",
booktitle = "Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)",
month = aug,
year = "2021",
address = "Bangkok, Thailand (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.iwslt-1.32/",
doi = "10.18653/v1/2021.iwslt-1.32",
pages = "276--286",
abstract = "Complex natural language applications such as speech translation or pivot translation traditionally rely on cascaded models. However,cascaded models are known to be prone to error propagation and model discrepancy problems. Furthermore, there is no possibility of using end-to-end training data in conventional cascaded systems, meaning that the training data most suited for the task cannot be used. Previous studies suggested several approaches for integrated end-to-end training to overcome those problems, however they mostly rely on(synthetic or natural) three-way data. We propose a cascaded model based on the non-autoregressive Transformer that enables end-to-end training without the need for an explicit intermediate representation. This new architecture (i) avoids unnecessary early decisions that can cause errors which are then propagated throughout the cascaded models and (ii) utilizes the end-to-end training data directly. We conduct an evaluation on two pivot-based machine translation tasks, namely French{\textrightarrow}German and German{\textrightarrow}Czech. Our experimental results show that the proposed architecture yields an improvement of more than 2 BLEU for French{\textrightarrow}German over the cascaded baseline."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tokarchuk-etal-2021-integrated">
<titleInfo>
<title>Integrated Training for Sequence-to-Sequence Models Using Non-Autoregressive Transformer</title>
</titleInfo>
<name type="personal">
<namePart type="given">Evgeniia</namePart>
<namePart type="family">Tokarchuk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Rosendahl</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiyue</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavel</namePart>
<namePart type="family">Petrushkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tomer</namePart>
<namePart type="family">Lancewicki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shahram</namePart>
<namePart type="family">Khadivi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hermann</namePart>
<namePart type="family">Ney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcello</namePart>
<namePart type="family">Federico</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Waibel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Costa-jussà</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Niehues</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Stuker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elizabeth</namePart>
<namePart type="family">Salesky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Complex natural language applications such as speech translation or pivot translation traditionally rely on cascaded models. However,cascaded models are known to be prone to error propagation and model discrepancy problems. Furthermore, there is no possibility of using end-to-end training data in conventional cascaded systems, meaning that the training data most suited for the task cannot be used. Previous studies suggested several approaches for integrated end-to-end training to overcome those problems, however they mostly rely on(synthetic or natural) three-way data. We propose a cascaded model based on the non-autoregressive Transformer that enables end-to-end training without the need for an explicit intermediate representation. This new architecture (i) avoids unnecessary early decisions that can cause errors which are then propagated throughout the cascaded models and (ii) utilizes the end-to-end training data directly. We conduct an evaluation on two pivot-based machine translation tasks, namely French→German and German→Czech. Our experimental results show that the proposed architecture yields an improvement of more than 2 BLEU for French→German over the cascaded baseline.</abstract>
<identifier type="citekey">tokarchuk-etal-2021-integrated</identifier>
<identifier type="doi">10.18653/v1/2021.iwslt-1.32</identifier>
<location>
<url>https://aclanthology.org/2021.iwslt-1.32/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>276</start>
<end>286</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Integrated Training for Sequence-to-Sequence Models Using Non-Autoregressive Transformer
%A Tokarchuk, Evgeniia
%A Rosendahl, Jan
%A Wang, Weiyue
%A Petrushkov, Pavel
%A Lancewicki, Tomer
%A Khadivi, Shahram
%A Ney, Hermann
%Y Federico, Marcello
%Y Waibel, Alex
%Y Costa-jussà, Marta R.
%Y Niehues, Jan
%Y Stuker, Sebastian
%Y Salesky, Elizabeth
%S Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand (online)
%F tokarchuk-etal-2021-integrated
%X Complex natural language applications such as speech translation or pivot translation traditionally rely on cascaded models. However,cascaded models are known to be prone to error propagation and model discrepancy problems. Furthermore, there is no possibility of using end-to-end training data in conventional cascaded systems, meaning that the training data most suited for the task cannot be used. Previous studies suggested several approaches for integrated end-to-end training to overcome those problems, however they mostly rely on(synthetic or natural) three-way data. We propose a cascaded model based on the non-autoregressive Transformer that enables end-to-end training without the need for an explicit intermediate representation. This new architecture (i) avoids unnecessary early decisions that can cause errors which are then propagated throughout the cascaded models and (ii) utilizes the end-to-end training data directly. We conduct an evaluation on two pivot-based machine translation tasks, namely French→German and German→Czech. Our experimental results show that the proposed architecture yields an improvement of more than 2 BLEU for French→German over the cascaded baseline.
%R 10.18653/v1/2021.iwslt-1.32
%U https://aclanthology.org/2021.iwslt-1.32/
%U https://doi.org/10.18653/v1/2021.iwslt-1.32
%P 276-286
Markdown (Informal)
[Integrated Training for Sequence-to-Sequence Models Using Non-Autoregressive Transformer](https://aclanthology.org/2021.iwslt-1.32/) (Tokarchuk et al., IWSLT 2021)
ACL