@inproceedings{zhang-sennrich-2021-edinburghs,
title = "{E}dinburgh{'}s End-to-End Multilingual Speech Translation System for {IWSLT} 2021",
author = "Zhang, Biao and
Sennrich, Rico",
booktitle = "Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)",
month = aug,
year = "2021",
address = "Bangkok, Thailand (online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.iwslt-1.19",
doi = "10.18653/v1/2021.iwslt-1.19",
pages = "160--168",
abstract = "This paper describes Edinburgh{'}s submissions to the IWSLT2021 multilingual speech translation (ST) task. We aim at improving multilingual translation and zero-shot performance in the constrained setting (without using any extra training data) through methods that encourage transfer learning and larger capacity modeling with advanced neural components. We build our end-to-end multilingual ST model based on Transformer, integrating techniques including adaptive speech feature selection, language-specific modeling, multi-task learning, deep and big Transformer, sparsified linear attention and root mean square layer normalization. We adopt data augmentation using machine translation models for ST which converts the zero-shot problem into a zero-resource one. Experimental results show that these methods deliver substantial improvements, surpassing the official baseline by {\textgreater} 15 average BLEU and outperforming our cascading system by {\textgreater} 2 average BLEU. Our final submission achieves competitive performance (runner up).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhang-sennrich-2021-edinburghs">
<titleInfo>
<title>Edinburgh’s End-to-End Multilingual Speech Translation System for IWSLT 2021</title>
</titleInfo>
<name type="personal">
<namePart type="given">Biao</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Rico</namePart>
<namePart type="family">Sennrich</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand (online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes Edinburgh’s submissions to the IWSLT2021 multilingual speech translation (ST) task. We aim at improving multilingual translation and zero-shot performance in the constrained setting (without using any extra training data) through methods that encourage transfer learning and larger capacity modeling with advanced neural components. We build our end-to-end multilingual ST model based on Transformer, integrating techniques including adaptive speech feature selection, language-specific modeling, multi-task learning, deep and big Transformer, sparsified linear attention and root mean square layer normalization. We adopt data augmentation using machine translation models for ST which converts the zero-shot problem into a zero-resource one. Experimental results show that these methods deliver substantial improvements, surpassing the official baseline by \textgreater 15 average BLEU and outperforming our cascading system by \textgreater 2 average BLEU. Our final submission achieves competitive performance (runner up).</abstract>
<identifier type="citekey">zhang-sennrich-2021-edinburghs</identifier>
<identifier type="doi">10.18653/v1/2021.iwslt-1.19</identifier>
<location>
<url>https://aclanthology.org/2021.iwslt-1.19</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>160</start>
<end>168</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Edinburgh’s End-to-End Multilingual Speech Translation System for IWSLT 2021
%A Zhang, Biao
%A Sennrich, Rico
%S Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand (online)
%F zhang-sennrich-2021-edinburghs
%X This paper describes Edinburgh’s submissions to the IWSLT2021 multilingual speech translation (ST) task. We aim at improving multilingual translation and zero-shot performance in the constrained setting (without using any extra training data) through methods that encourage transfer learning and larger capacity modeling with advanced neural components. We build our end-to-end multilingual ST model based on Transformer, integrating techniques including adaptive speech feature selection, language-specific modeling, multi-task learning, deep and big Transformer, sparsified linear attention and root mean square layer normalization. We adopt data augmentation using machine translation models for ST which converts the zero-shot problem into a zero-resource one. Experimental results show that these methods deliver substantial improvements, surpassing the official baseline by \textgreater 15 average BLEU and outperforming our cascading system by \textgreater 2 average BLEU. Our final submission achieves competitive performance (runner up).
%R 10.18653/v1/2021.iwslt-1.19
%U https://aclanthology.org/2021.iwslt-1.19
%U https://doi.org/10.18653/v1/2021.iwslt-1.19
%P 160-168
Markdown (Informal)
[Edinburgh’s End-to-End Multilingual Speech Translation System for IWSLT 2021](https://aclanthology.org/2021.iwslt-1.19) (Zhang & Sennrich, IWSLT 2021)
ACL