@inproceedings{junczys-dowmunt-etal-2018-marian-cost,
title = "{M}arian: Cost-effective High-Quality Neural Machine Translation in {C}++",
author = "Junczys-Dowmunt, Marcin and
Heafield, Kenneth and
Hoang, Hieu and
Grundkiewicz, Roman and
Aue, Anthony",
editor = "Birch, Alexandra and
Finch, Andrew and
Luong, Thang and
Neubig, Graham and
Oda, Yusuke",
booktitle = "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-2716",
doi = "10.18653/v1/W18-2716",
pages = "129--135",
abstract = "This paper describes the submissions of the {``}Marian{''} team to the WNMT 2018 shared task. We investigate combinations of teacher-student training, low-precision matrix products, auto-tuning and other methods to optimize the Transformer model on GPU and CPU. By further integrating these methods with the new averaging attention networks, a recently introduced faster Transformer variant, we create a number of high-quality, high-performance models on the GPU and CPU, dominating the Pareto frontier for this shared task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="junczys-dowmunt-etal-2018-marian-cost">
<titleInfo>
<title>Marian: Cost-effective High-Quality Neural Machine Translation in C++</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marcin</namePart>
<namePart type="family">Junczys-Dowmunt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Heafield</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hieu</namePart>
<namePart type="family">Hoang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roman</namePart>
<namePart type="family">Grundkiewicz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anthony</namePart>
<namePart type="family">Aue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Neural Machine Translation and Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Birch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andrew</namePart>
<namePart type="family">Finch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Thang</namePart>
<namePart type="family">Luong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Oda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes the submissions of the “Marian” team to the WNMT 2018 shared task. We investigate combinations of teacher-student training, low-precision matrix products, auto-tuning and other methods to optimize the Transformer model on GPU and CPU. By further integrating these methods with the new averaging attention networks, a recently introduced faster Transformer variant, we create a number of high-quality, high-performance models on the GPU and CPU, dominating the Pareto frontier for this shared task.</abstract>
<identifier type="citekey">junczys-dowmunt-etal-2018-marian-cost</identifier>
<identifier type="doi">10.18653/v1/W18-2716</identifier>
<location>
<url>https://aclanthology.org/W18-2716</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>129</start>
<end>135</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Marian: Cost-effective High-Quality Neural Machine Translation in C++
%A Junczys-Dowmunt, Marcin
%A Heafield, Kenneth
%A Hoang, Hieu
%A Grundkiewicz, Roman
%A Aue, Anthony
%Y Birch, Alexandra
%Y Finch, Andrew
%Y Luong, Thang
%Y Neubig, Graham
%Y Oda, Yusuke
%S Proceedings of the 2nd Workshop on Neural Machine Translation and Generation
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F junczys-dowmunt-etal-2018-marian-cost
%X This paper describes the submissions of the “Marian” team to the WNMT 2018 shared task. We investigate combinations of teacher-student training, low-precision matrix products, auto-tuning and other methods to optimize the Transformer model on GPU and CPU. By further integrating these methods with the new averaging attention networks, a recently introduced faster Transformer variant, we create a number of high-quality, high-performance models on the GPU and CPU, dominating the Pareto frontier for this shared task.
%R 10.18653/v1/W18-2716
%U https://aclanthology.org/W18-2716
%U https://doi.org/10.18653/v1/W18-2716
%P 129-135
Markdown (Informal)
[Marian: Cost-effective High-Quality Neural Machine Translation in C++](https://aclanthology.org/W18-2716) (Junczys-Dowmunt et al., NGT 2018)
ACL