@inproceedings{kementchedjhieva-sogaard-2023-grammatical,
title = "Grammatical Error Correction through Round-Trip Machine Translation",
author = "Kementchedjhieva, Yova and
S{\o}gaard, Anders",
booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-eacl.165",
doi = "10.18653/v1/2023.findings-eacl.165",
pages = "2208--2215",
abstract = "Machine translation (MT) operates on the premise of an interlingua which abstracts away from surface form while preserving meaning. A decade ago the idea of using round-trip MT to guide grammatical error correction was proposed as a way to abstract away from potential errors in surface forms (Madnani et al., 2012). At the time, it did not pan out due to the low quality of MT systems of the day. Today much stronger MT systems are available so we re-evaluate this idea across five languages and models of various sizes. We find that for extra large models input augmentation through round-trip MT has little to no effect. For more {`}workable{'} model sizes, however, it yields consistent improvements, sometimes bringing the performance of a \textit{base} or \textit{large} model up to that of a \textit{large} or \textit{xl} model, respectively. The round-trip translation comes at a computational cost though, so one would have to determine whether to opt for a larger model or for input augmentation on a case-by-case basis.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kementchedjhieva-sogaard-2023-grammatical">
<titleInfo>
<title>Grammatical Error Correction through Round-Trip Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yova</namePart>
<namePart type="family">Kementchedjhieva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Søgaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EACL 2023</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Machine translation (MT) operates on the premise of an interlingua which abstracts away from surface form while preserving meaning. A decade ago the idea of using round-trip MT to guide grammatical error correction was proposed as a way to abstract away from potential errors in surface forms (Madnani et al., 2012). At the time, it did not pan out due to the low quality of MT systems of the day. Today much stronger MT systems are available so we re-evaluate this idea across five languages and models of various sizes. We find that for extra large models input augmentation through round-trip MT has little to no effect. For more ‘workable’ model sizes, however, it yields consistent improvements, sometimes bringing the performance of a base or large model up to that of a large or xl model, respectively. The round-trip translation comes at a computational cost though, so one would have to determine whether to opt for a larger model or for input augmentation on a case-by-case basis.</abstract>
<identifier type="citekey">kementchedjhieva-sogaard-2023-grammatical</identifier>
<identifier type="doi">10.18653/v1/2023.findings-eacl.165</identifier>
<location>
<url>https://aclanthology.org/2023.findings-eacl.165</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2208</start>
<end>2215</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Grammatical Error Correction through Round-Trip Machine Translation
%A Kementchedjhieva, Yova
%A Søgaard, Anders
%S Findings of the Association for Computational Linguistics: EACL 2023
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F kementchedjhieva-sogaard-2023-grammatical
%X Machine translation (MT) operates on the premise of an interlingua which abstracts away from surface form while preserving meaning. A decade ago the idea of using round-trip MT to guide grammatical error correction was proposed as a way to abstract away from potential errors in surface forms (Madnani et al., 2012). At the time, it did not pan out due to the low quality of MT systems of the day. Today much stronger MT systems are available so we re-evaluate this idea across five languages and models of various sizes. We find that for extra large models input augmentation through round-trip MT has little to no effect. For more ‘workable’ model sizes, however, it yields consistent improvements, sometimes bringing the performance of a base or large model up to that of a large or xl model, respectively. The round-trip translation comes at a computational cost though, so one would have to determine whether to opt for a larger model or for input augmentation on a case-by-case basis.
%R 10.18653/v1/2023.findings-eacl.165
%U https://aclanthology.org/2023.findings-eacl.165
%U https://doi.org/10.18653/v1/2023.findings-eacl.165
%P 2208-2215
Markdown (Informal)
[Grammatical Error Correction through Round-Trip Machine Translation](https://aclanthology.org/2023.findings-eacl.165) (Kementchedjhieva & Søgaard, Findings 2023)
ACL