@inproceedings{takahashi-etal-2022-proqe,
title = "{P}ro{QE}: Proficiency-wise Quality Estimation dataset for Grammatical Error Correction",
author = "Takahashi, Yujin and
Kaneko, Masahiro and
Mita, Masato and
Komachi, Mamoru",
booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.lrec-1.644",
pages = "5994--6000",
abstract = "This study investigates how supervised quality estimation (QE) models of grammatical error correction (GEC) are affected by the learners{'} proficiency with the data. QE models for GEC evaluations in prior work have obtained a high correlation with manual evaluations. However, when functioning in a real-world context, the data used for the reported results have limitations because prior works were biased toward data by learners with relatively high proficiency levels. To address this issue, we created a QE dataset that includes multiple proficiency levels and explored the necessity of performing proficiency-wise evaluation for QE of GEC. Our experiments demonstrated that differences in evaluation dataset proficiency affect the performance of QE models, and proficiency-wise evaluation helps create more robust models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="takahashi-etal-2022-proqe">
<titleInfo>
<title>ProQE: Proficiency-wise Quality Estimation dataset for Grammatical Error Correction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yujin</namePart>
<namePart type="family">Takahashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masahiro</namePart>
<namePart type="family">Kaneko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masato</namePart>
<namePart type="family">Mita</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mamoru</namePart>
<namePart type="family">Komachi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Thirteenth Language Resources and Evaluation Conference</title>
</titleInfo>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This study investigates how supervised quality estimation (QE) models of grammatical error correction (GEC) are affected by the learners’ proficiency with the data. QE models for GEC evaluations in prior work have obtained a high correlation with manual evaluations. However, when functioning in a real-world context, the data used for the reported results have limitations because prior works were biased toward data by learners with relatively high proficiency levels. To address this issue, we created a QE dataset that includes multiple proficiency levels and explored the necessity of performing proficiency-wise evaluation for QE of GEC. Our experiments demonstrated that differences in evaluation dataset proficiency affect the performance of QE models, and proficiency-wise evaluation helps create more robust models.</abstract>
<identifier type="citekey">takahashi-etal-2022-proqe</identifier>
<location>
<url>https://aclanthology.org/2022.lrec-1.644</url>
</location>
<part>
<date>2022-06</date>
<extent unit="page">
<start>5994</start>
<end>6000</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ProQE: Proficiency-wise Quality Estimation dataset for Grammatical Error Correction
%A Takahashi, Yujin
%A Kaneko, Masahiro
%A Mita, Masato
%A Komachi, Mamoru
%S Proceedings of the Thirteenth Language Resources and Evaluation Conference
%D 2022
%8 June
%I European Language Resources Association
%C Marseille, France
%F takahashi-etal-2022-proqe
%X This study investigates how supervised quality estimation (QE) models of grammatical error correction (GEC) are affected by the learners’ proficiency with the data. QE models for GEC evaluations in prior work have obtained a high correlation with manual evaluations. However, when functioning in a real-world context, the data used for the reported results have limitations because prior works were biased toward data by learners with relatively high proficiency levels. To address this issue, we created a QE dataset that includes multiple proficiency levels and explored the necessity of performing proficiency-wise evaluation for QE of GEC. Our experiments demonstrated that differences in evaluation dataset proficiency affect the performance of QE models, and proficiency-wise evaluation helps create more robust models.
%U https://aclanthology.org/2022.lrec-1.644
%P 5994-6000
Markdown (Informal)
[ProQE: Proficiency-wise Quality Estimation dataset for Grammatical Error Correction](https://aclanthology.org/2022.lrec-1.644) (Takahashi et al., LREC 2022)
ACL