@inproceedings{mathur-etal-2018-towards,
title = "Towards Efficient Machine Translation Evaluation by Modelling Annotators",
author = "Mathur, Nitika and
Baldwin, Timothy and
Cohn, Trevor",
editor = "Kim, Sunghwan Mac and
Zhang, Xiuzhen (Jenny)",
booktitle = "Proceedings of the Australasian Language Technology Association Workshop 2018",
month = dec,
year = "2018",
address = "Dunedin, New Zealand",
url = "https://aclanthology.org/U18-1010/",
pages = "77--82",
abstract = "Accurate evaluation of translation has long been a difficult, yet important problem. Current evaluations use direct assessment (DA), based on crowd sourcing judgements from a large pool of workers, along with quality control checks, and a robust method for combining redundant judgements. In this paper we show that the quality control mechanism is overly conservative, which increases the time and expense of the evaluation. We propose a model that does not rely on a pre-processing step to filter workers and takes into account varying annotator reliabilities. Our model effectively weights each worker`s scores based on the inferred precision of the worker, and is much more reliable than the mean of either the raw scores or the standardised scores. We also show that DA does not deliver on the promise of longitudinal evaluation, and propose redesigning the structure of the annotation tasks that can solve this problem."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mathur-etal-2018-towards">
<titleInfo>
<title>Towards Efficient Machine Translation Evaluation by Modelling Annotators</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nitika</namePart>
<namePart type="family">Mathur</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Timothy</namePart>
<namePart type="family">Baldwin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Australasian Language Technology Association Workshop 2018</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sunghwan</namePart>
<namePart type="given">Mac</namePart>
<namePart type="family">Kim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiuzhen</namePart>
<namePart type="given">(Jenny)</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<place>
<placeTerm type="text">Dunedin, New Zealand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Accurate evaluation of translation has long been a difficult, yet important problem. Current evaluations use direct assessment (DA), based on crowd sourcing judgements from a large pool of workers, along with quality control checks, and a robust method for combining redundant judgements. In this paper we show that the quality control mechanism is overly conservative, which increases the time and expense of the evaluation. We propose a model that does not rely on a pre-processing step to filter workers and takes into account varying annotator reliabilities. Our model effectively weights each worker‘s scores based on the inferred precision of the worker, and is much more reliable than the mean of either the raw scores or the standardised scores. We also show that DA does not deliver on the promise of longitudinal evaluation, and propose redesigning the structure of the annotation tasks that can solve this problem.</abstract>
<identifier type="citekey">mathur-etal-2018-towards</identifier>
<location>
<url>https://aclanthology.org/U18-1010/</url>
</location>
<part>
<date>2018-12</date>
<extent unit="page">
<start>77</start>
<end>82</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Towards Efficient Machine Translation Evaluation by Modelling Annotators
%A Mathur, Nitika
%A Baldwin, Timothy
%A Cohn, Trevor
%Y Kim, Sunghwan Mac
%Y Zhang, Xiuzhen (Jenny)
%S Proceedings of the Australasian Language Technology Association Workshop 2018
%D 2018
%8 December
%C Dunedin, New Zealand
%F mathur-etal-2018-towards
%X Accurate evaluation of translation has long been a difficult, yet important problem. Current evaluations use direct assessment (DA), based on crowd sourcing judgements from a large pool of workers, along with quality control checks, and a robust method for combining redundant judgements. In this paper we show that the quality control mechanism is overly conservative, which increases the time and expense of the evaluation. We propose a model that does not rely on a pre-processing step to filter workers and takes into account varying annotator reliabilities. Our model effectively weights each worker‘s scores based on the inferred precision of the worker, and is much more reliable than the mean of either the raw scores or the standardised scores. We also show that DA does not deliver on the promise of longitudinal evaluation, and propose redesigning the structure of the annotation tasks that can solve this problem.
%U https://aclanthology.org/U18-1010/
%P 77-82
Markdown (Informal)
[Towards Efficient Machine Translation Evaluation by Modelling Annotators](https://aclanthology.org/U18-1010/) (Mathur et al., ALTA 2018)
ACL