@inproceedings{bogoychev-etal-2022-edinburghs,
title = "{E}dinburgh{'}s Submission to the {WMT} 2022 Efficiency Task",
author = "Bogoychev, Nikolay and
Behnke, Maximiliana and
Van Der Linde, Jelmer and
Nail, Graeme and
Heafield, Kenneth and
Zhang, Biao and
Kashyap, Sidharth",
booktitle = "Proceedings of the Seventh Conference on Machine Translation (WMT)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.wmt-1.63",
pages = "661--667",
abstract = "We participated in all tracks of the WMT 2022 efficient machine translation task: single-core CPU, multi-core CPU, and GPU hardware with throughput and latency conditions. Our submissions explores a number of several efficiency strategies: knowledge distillation, a simpler simple recurrent unit (SSRU) decoder with one or two layers, shortlisting, deep encoder, shallow decoder, pruning and bidirectional decoder. For the CPU track, we used quantized 8-bit models. For the GPU track, we used FP16 quantisation. We explored various pruning strategies and combination of one or more of the above methods.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="bogoychev-etal-2022-edinburghs">
<titleInfo>
<title>Edinburgh’s Submission to the WMT 2022 Efficiency Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nikolay</namePart>
<namePart type="family">Bogoychev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maximiliana</namePart>
<namePart type="family">Behnke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jelmer</namePart>
<namePart type="family">Van Der Linde</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graeme</namePart>
<namePart type="family">Nail</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Heafield</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Biao</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sidharth</namePart>
<namePart type="family">Kashyap</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Seventh Conference on Machine Translation (WMT)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We participated in all tracks of the WMT 2022 efficient machine translation task: single-core CPU, multi-core CPU, and GPU hardware with throughput and latency conditions. Our submissions explores a number of several efficiency strategies: knowledge distillation, a simpler simple recurrent unit (SSRU) decoder with one or two layers, shortlisting, deep encoder, shallow decoder, pruning and bidirectional decoder. For the CPU track, we used quantized 8-bit models. For the GPU track, we used FP16 quantisation. We explored various pruning strategies and combination of one or more of the above methods.</abstract>
<identifier type="citekey">bogoychev-etal-2022-edinburghs</identifier>
<location>
<url>https://aclanthology.org/2022.wmt-1.63</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>661</start>
<end>667</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Edinburgh’s Submission to the WMT 2022 Efficiency Task
%A Bogoychev, Nikolay
%A Behnke, Maximiliana
%A Van Der Linde, Jelmer
%A Nail, Graeme
%A Heafield, Kenneth
%A Zhang, Biao
%A Kashyap, Sidharth
%S Proceedings of the Seventh Conference on Machine Translation (WMT)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F bogoychev-etal-2022-edinburghs
%X We participated in all tracks of the WMT 2022 efficient machine translation task: single-core CPU, multi-core CPU, and GPU hardware with throughput and latency conditions. Our submissions explores a number of several efficiency strategies: knowledge distillation, a simpler simple recurrent unit (SSRU) decoder with one or two layers, shortlisting, deep encoder, shallow decoder, pruning and bidirectional decoder. For the CPU track, we used quantized 8-bit models. For the GPU track, we used FP16 quantisation. We explored various pruning strategies and combination of one or more of the above methods.
%U https://aclanthology.org/2022.wmt-1.63
%P 661-667
Markdown (Informal)
[Edinburgh’s Submission to the WMT 2022 Efficiency Task](https://aclanthology.org/2022.wmt-1.63) (Bogoychev et al., WMT 2022)
ACL
- Nikolay Bogoychev, Maximiliana Behnke, Jelmer Van Der Linde, Graeme Nail, Kenneth Heafield, Biao Zhang, and Sidharth Kashyap. 2022. Edinburgh’s Submission to the WMT 2022 Efficiency Task. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 661–667, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.