@inproceedings{martinez-alonso-etal-2017-annotating,
title = "Annotating omission in statement pairs",
author = "Mart{\'\i}nez Alonso, H{\'e}ctor and
Delamaire, Amaury and
Sagot, Beno{\^\i}t",
editor = "Schneider, Nathan and
Xue, Nianwen",
booktitle = "Proceedings of the 11th Linguistic Annotation Workshop",
month = apr,
year = "2017",
address = "Valencia, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-0805",
doi = "10.18653/v1/W17-0805",
pages = "41--45",
abstract = "We focus on the identification of omission in statement pairs. We compare three annotation schemes, namely two different crowdsourcing schemes and manual expert annotation. We show that the simplest of the two crowdsourcing approaches yields a better annotation quality than the more complex one. We use a dedicated classifier to assess whether the annotators{'} behavior can be explained by straightforward linguistic features. The classifier benefits from a modeling that uses lexical information beyond length and overlap measures. However, for our task, we argue that expert and not crowdsourcing-based annotation is the best compromise between annotation cost and quality.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="martinez-alonso-etal-2017-annotating">
<titleInfo>
<title>Annotating omission in statement pairs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Héctor</namePart>
<namePart type="family">Martínez Alonso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amaury</namePart>
<namePart type="family">Delamaire</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benoît</namePart>
<namePart type="family">Sagot</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 11th Linguistic Annotation Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nathan</namePart>
<namePart type="family">Schneider</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Valencia, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We focus on the identification of omission in statement pairs. We compare three annotation schemes, namely two different crowdsourcing schemes and manual expert annotation. We show that the simplest of the two crowdsourcing approaches yields a better annotation quality than the more complex one. We use a dedicated classifier to assess whether the annotators’ behavior can be explained by straightforward linguistic features. The classifier benefits from a modeling that uses lexical information beyond length and overlap measures. However, for our task, we argue that expert and not crowdsourcing-based annotation is the best compromise between annotation cost and quality.</abstract>
<identifier type="citekey">martinez-alonso-etal-2017-annotating</identifier>
<identifier type="doi">10.18653/v1/W17-0805</identifier>
<location>
<url>https://aclanthology.org/W17-0805</url>
</location>
<part>
<date>2017-04</date>
<extent unit="page">
<start>41</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Annotating omission in statement pairs
%A Martínez Alonso, Héctor
%A Delamaire, Amaury
%A Sagot, Benoît
%Y Schneider, Nathan
%Y Xue, Nianwen
%S Proceedings of the 11th Linguistic Annotation Workshop
%D 2017
%8 April
%I Association for Computational Linguistics
%C Valencia, Spain
%F martinez-alonso-etal-2017-annotating
%X We focus on the identification of omission in statement pairs. We compare three annotation schemes, namely two different crowdsourcing schemes and manual expert annotation. We show that the simplest of the two crowdsourcing approaches yields a better annotation quality than the more complex one. We use a dedicated classifier to assess whether the annotators’ behavior can be explained by straightforward linguistic features. The classifier benefits from a modeling that uses lexical information beyond length and overlap measures. However, for our task, we argue that expert and not crowdsourcing-based annotation is the best compromise between annotation cost and quality.
%R 10.18653/v1/W17-0805
%U https://aclanthology.org/W17-0805
%U https://doi.org/10.18653/v1/W17-0805
%P 41-45
Markdown (Informal)
[Annotating omission in statement pairs](https://aclanthology.org/W17-0805) (Martínez Alonso et al., LAW 2017)
ACL
- Héctor Martínez Alonso, Amaury Delamaire, and Benoît Sagot. 2017. Annotating omission in statement pairs. In Proceedings of the 11th Linguistic Annotation Workshop, pages 41–45, Valencia, Spain. Association for Computational Linguistics.