@inproceedings{iglesias-etal-2018-accelerating,
title = "Accelerating {NMT} Batched Beam Decoding with {LMBR} Posteriors for Deployment",
author = "Iglesias, Gonzalo and
Tambellini, William and
De Gispert, Adri{\`a} and
Hasler, Eva and
Byrne, Bill",
editor = "Bangalore, Srinivas and
Chu-Carroll, Jennifer and
Li, Yunyao",
booktitle = "Proceedings of the 2018 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers)",
month = jun,
year = "2018",
address = "New Orleans - Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/N18-3013",
doi = "10.18653/v1/N18-3013",
pages = "106--113",
abstract = "We describe a batched beam decoding algorithm for NMT with LMBR n-gram posteriors, showing that LMBR techniques still yield gains on top of the best recently reported results with Transformers. We also discuss acceleration strategies for deployment, and the effect of the beam size and batching on memory and speed.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="iglesias-etal-2018-accelerating">
<titleInfo>
<title>Accelerating NMT Batched Beam Decoding with LMBR Posteriors for Deployment</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gonzalo</namePart>
<namePart type="family">Iglesias</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Tambellini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Adrià</namePart>
<namePart type="family">De Gispert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eva</namePart>
<namePart type="family">Hasler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bill</namePart>
<namePart type="family">Byrne</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Srinivas</namePart>
<namePart type="family">Bangalore</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jennifer</namePart>
<namePart type="family">Chu-Carroll</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yunyao</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans - Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We describe a batched beam decoding algorithm for NMT with LMBR n-gram posteriors, showing that LMBR techniques still yield gains on top of the best recently reported results with Transformers. We also discuss acceleration strategies for deployment, and the effect of the beam size and batching on memory and speed.</abstract>
<identifier type="citekey">iglesias-etal-2018-accelerating</identifier>
<identifier type="doi">10.18653/v1/N18-3013</identifier>
<location>
<url>https://aclanthology.org/N18-3013</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>106</start>
<end>113</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Accelerating NMT Batched Beam Decoding with LMBR Posteriors for Deployment
%A Iglesias, Gonzalo
%A Tambellini, William
%A De Gispert, Adrià
%A Hasler, Eva
%A Byrne, Bill
%Y Bangalore, Srinivas
%Y Chu-Carroll, Jennifer
%Y Li, Yunyao
%S Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers)
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans - Louisiana
%F iglesias-etal-2018-accelerating
%X We describe a batched beam decoding algorithm for NMT with LMBR n-gram posteriors, showing that LMBR techniques still yield gains on top of the best recently reported results with Transformers. We also discuss acceleration strategies for deployment, and the effect of the beam size and batching on memory and speed.
%R 10.18653/v1/N18-3013
%U https://aclanthology.org/N18-3013
%U https://doi.org/10.18653/v1/N18-3013
%P 106-113
Markdown (Informal)
[Accelerating NMT Batched Beam Decoding with LMBR Posteriors for Deployment](https://aclanthology.org/N18-3013) (Iglesias et al., NAACL 2018)
ACL
- Gonzalo Iglesias, William Tambellini, Adrià De Gispert, Eva Hasler, and Bill Byrne. 2018. Accelerating NMT Batched Beam Decoding with LMBR Posteriors for Deployment. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers), pages 106–113, New Orleans - Louisiana. Association for Computational Linguistics.