@inproceedings{sutawika-cruz-2021-data,
title = "Data Processing Matters: {SRPH}-Konvergen {AI}{'}s Machine Translation System for {WMT}{'}21",
author = "Sutawika, Lintang and
Cruz, Jan Christian Blaise",
booktitle = "Proceedings of the Sixth Conference on Machine Translation",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.wmt-1.52",
pages = "431--438",
abstract = "In this paper, we describe the submission of the joint Samsung Research Philippines-Konvergen AI team for the WMT{'}21 Large Scale Multilingual Translation Task - Small Track 2. We submit a standard Seq2Seq Transformer model to the shared task without any training or architecture tricks, relying mainly on the strength of our data preprocessing techniques to boost performance. Our final submission model scored 22.92 average BLEU on the FLORES-101 devtest set, and scored 22.97 average BLEU on the contest{'}s hidden test set, ranking us sixth overall. Despite using only a standard Transformer, our model ranked first in Indonesian to Javanese, showing that data preprocessing matters equally, if not more, than cutting edge model architectures and training techniques.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sutawika-cruz-2021-data">
<titleInfo>
<title>Data Processing Matters: SRPH-Konvergen AI’s Machine Translation System for WMT’21</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lintang</namePart>
<namePart type="family">Sutawika</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="given">Christian</namePart>
<namePart type="given">Blaise</namePart>
<namePart type="family">Cruz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth Conference on Machine Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this paper, we describe the submission of the joint Samsung Research Philippines-Konvergen AI team for the WMT’21 Large Scale Multilingual Translation Task - Small Track 2. We submit a standard Seq2Seq Transformer model to the shared task without any training or architecture tricks, relying mainly on the strength of our data preprocessing techniques to boost performance. Our final submission model scored 22.92 average BLEU on the FLORES-101 devtest set, and scored 22.97 average BLEU on the contest’s hidden test set, ranking us sixth overall. Despite using only a standard Transformer, our model ranked first in Indonesian to Javanese, showing that data preprocessing matters equally, if not more, than cutting edge model architectures and training techniques.</abstract>
<identifier type="citekey">sutawika-cruz-2021-data</identifier>
<location>
<url>https://aclanthology.org/2021.wmt-1.52</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>431</start>
<end>438</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Data Processing Matters: SRPH-Konvergen AI’s Machine Translation System for WMT’21
%A Sutawika, Lintang
%A Cruz, Jan Christian Blaise
%S Proceedings of the Sixth Conference on Machine Translation
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F sutawika-cruz-2021-data
%X In this paper, we describe the submission of the joint Samsung Research Philippines-Konvergen AI team for the WMT’21 Large Scale Multilingual Translation Task - Small Track 2. We submit a standard Seq2Seq Transformer model to the shared task without any training or architecture tricks, relying mainly on the strength of our data preprocessing techniques to boost performance. Our final submission model scored 22.92 average BLEU on the FLORES-101 devtest set, and scored 22.97 average BLEU on the contest’s hidden test set, ranking us sixth overall. Despite using only a standard Transformer, our model ranked first in Indonesian to Javanese, showing that data preprocessing matters equally, if not more, than cutting edge model architectures and training techniques.
%U https://aclanthology.org/2021.wmt-1.52
%P 431-438
Markdown (Informal)
[Data Processing Matters: SRPH-Konvergen AI’s Machine Translation System for WMT’21](https://aclanthology.org/2021.wmt-1.52) (Sutawika & Cruz, WMT 2021)
ACL