@inproceedings{scholman-demberg-2017-crowdsourcing,
title = "Crowdsourcing discourse interpretations: On the influence of context and the reliability of a connective insertion task",
author = "Scholman, Merel and
Demberg, Vera",
editor = "Schneider, Nathan and
Xue, Nianwen",
booktitle = "Proceedings of the 11th Linguistic Annotation Workshop",
month = apr,
year = "2017",
address = "Valencia, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-0803",
doi = "10.18653/v1/W17-0803",
pages = "24--33",
abstract = "Traditional discourse annotation tasks are considered costly and time-consuming, and the reliability and validity of these tasks is in question. In this paper, we investigate whether crowdsourcing can be used to obtain reliable discourse relation annotations. We also examine the influence of context on the reliability of the data. The results of a crowdsourced connective insertion task showed that the method can be used to obtain reliable annotations: The majority of the inserted connectives converged with the original label. Further, the method is sensitive to the fact that multiple senses can often be inferred for a single relation. Regarding the presence of context, the results show no significant difference in distributions of insertions between conditions overall. However, a by-item comparison revealed several characteristics of segments that determine whether the presence of context makes a difference in annotations. The findings discussed in this paper can be taken as evidence that crowdsourcing can be used as a valuable method to obtain insights into the sense(s) of relations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="scholman-demberg-2017-crowdsourcing">
<titleInfo>
<title>Crowdsourcing discourse interpretations: On the influence of context and the reliability of a connective insertion task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Merel</namePart>
<namePart type="family">Scholman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 11th Linguistic Annotation Workshop</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nathan</namePart>
<namePart type="family">Schneider</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Valencia, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Traditional discourse annotation tasks are considered costly and time-consuming, and the reliability and validity of these tasks is in question. In this paper, we investigate whether crowdsourcing can be used to obtain reliable discourse relation annotations. We also examine the influence of context on the reliability of the data. The results of a crowdsourced connective insertion task showed that the method can be used to obtain reliable annotations: The majority of the inserted connectives converged with the original label. Further, the method is sensitive to the fact that multiple senses can often be inferred for a single relation. Regarding the presence of context, the results show no significant difference in distributions of insertions between conditions overall. However, a by-item comparison revealed several characteristics of segments that determine whether the presence of context makes a difference in annotations. The findings discussed in this paper can be taken as evidence that crowdsourcing can be used as a valuable method to obtain insights into the sense(s) of relations.</abstract>
<identifier type="citekey">scholman-demberg-2017-crowdsourcing</identifier>
<identifier type="doi">10.18653/v1/W17-0803</identifier>
<location>
<url>https://aclanthology.org/W17-0803</url>
</location>
<part>
<date>2017-04</date>
<extent unit="page">
<start>24</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Crowdsourcing discourse interpretations: On the influence of context and the reliability of a connective insertion task
%A Scholman, Merel
%A Demberg, Vera
%Y Schneider, Nathan
%Y Xue, Nianwen
%S Proceedings of the 11th Linguistic Annotation Workshop
%D 2017
%8 April
%I Association for Computational Linguistics
%C Valencia, Spain
%F scholman-demberg-2017-crowdsourcing
%X Traditional discourse annotation tasks are considered costly and time-consuming, and the reliability and validity of these tasks is in question. In this paper, we investigate whether crowdsourcing can be used to obtain reliable discourse relation annotations. We also examine the influence of context on the reliability of the data. The results of a crowdsourced connective insertion task showed that the method can be used to obtain reliable annotations: The majority of the inserted connectives converged with the original label. Further, the method is sensitive to the fact that multiple senses can often be inferred for a single relation. Regarding the presence of context, the results show no significant difference in distributions of insertions between conditions overall. However, a by-item comparison revealed several characteristics of segments that determine whether the presence of context makes a difference in annotations. The findings discussed in this paper can be taken as evidence that crowdsourcing can be used as a valuable method to obtain insights into the sense(s) of relations.
%R 10.18653/v1/W17-0803
%U https://aclanthology.org/W17-0803
%U https://doi.org/10.18653/v1/W17-0803
%P 24-33
Markdown (Informal)
[Crowdsourcing discourse interpretations: On the influence of context and the reliability of a connective insertion task](https://aclanthology.org/W17-0803) (Scholman & Demberg, LAW 2017)
ACL