@inproceedings{haralabopoulos-etal-2020-objective,
title = "Objective Assessment of Subjective Tasks in Crowdsourcing Applications",
author = "Haralabopoulos, Giannis and
Tsikandilakis, Myron and
Torres Torres, Mercedes and
McAuley, Derek",
editor = "Fiumara, James and
Cieri, Christopher and
Liberman, Mark and
Callison-Burch, Chris",
booktitle = "Proceedings of the LREC 2020 Workshop on ``Citizen Linguistics in Language Resource Development''",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2020.cllrd-1.3",
pages = "15--25",
abstract = "Labelling, or annotation, is the process by which we assign labels to an item with regards to a task. In some Artificial Intelligence problems, such as Computer Vision tasks, the goal is to obtain objective labels. However, in problems such as text and sentiment analysis, subjective labelling is often required. More so when the sentiment analysis deals with actual emotions instead of polarity (positive/negative) . Scientists employ human experts to create these labels, but it is costly and time consuming. Crowdsourcing enables researchers to utilise non-expert knowledge for scientific tasks. From image analysis to semantic annotation, interested researchers can gather a large sample of answers via crowdsourcing platforms in a timely manner. However, non-expert contributions often need to be thoroughly assessed, particularly so when a task is subjective. Researchers have traditionally used {`}Gold Standard{'}, {`}Thresholding{'} and {`}Majority Voting{'} as methods to filter non-expert contributions. We argue that these methods are unsuitable for subjective tasks, such as lexicon acquisition and sentiment analysis. We discuss subjectivity in human centered tasks and present a filtering method that defines quality contributors, based on a set of objectively infused terms in a lexicon acquisition task. We evaluate our method against an established lexicon, the diversity of emotions - i.e. subjectivity- and the exclusion of contributions. Our proposed objective evaluation method can be used to assess contributors in subjective tasks that will provide domain agnostic, quality results, with at least 7{\%} improvement over traditional methods.",
language = "English",
ISBN = "979-10-95546-59-7",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="haralabopoulos-etal-2020-objective">
<titleInfo>
<title>Objective Assessment of Subjective Tasks in Crowdsourcing Applications</title>
</titleInfo>
<name type="personal">
<namePart type="given">Giannis</namePart>
<namePart type="family">Haralabopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Myron</namePart>
<namePart type="family">Tsikandilakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mercedes</namePart>
<namePart type="family">Torres Torres</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Derek</namePart>
<namePart type="family">McAuley</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the LREC 2020 Workshop on “Citizen Linguistics in Language Resource Development”</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Fiumara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christopher</namePart>
<namePart type="family">Cieri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mark</namePart>
<namePart type="family">Liberman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chris</namePart>
<namePart type="family">Callison-Burch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-10-95546-59-7</identifier>
</relatedItem>
<abstract>Labelling, or annotation, is the process by which we assign labels to an item with regards to a task. In some Artificial Intelligence problems, such as Computer Vision tasks, the goal is to obtain objective labels. However, in problems such as text and sentiment analysis, subjective labelling is often required. More so when the sentiment analysis deals with actual emotions instead of polarity (positive/negative) . Scientists employ human experts to create these labels, but it is costly and time consuming. Crowdsourcing enables researchers to utilise non-expert knowledge for scientific tasks. From image analysis to semantic annotation, interested researchers can gather a large sample of answers via crowdsourcing platforms in a timely manner. However, non-expert contributions often need to be thoroughly assessed, particularly so when a task is subjective. Researchers have traditionally used ‘Gold Standard’, ‘Thresholding’ and ‘Majority Voting’ as methods to filter non-expert contributions. We argue that these methods are unsuitable for subjective tasks, such as lexicon acquisition and sentiment analysis. We discuss subjectivity in human centered tasks and present a filtering method that defines quality contributors, based on a set of objectively infused terms in a lexicon acquisition task. We evaluate our method against an established lexicon, the diversity of emotions - i.e. subjectivity- and the exclusion of contributions. Our proposed objective evaluation method can be used to assess contributors in subjective tasks that will provide domain agnostic, quality results, with at least 7% improvement over traditional methods.</abstract>
<identifier type="citekey">haralabopoulos-etal-2020-objective</identifier>
<location>
<url>https://aclanthology.org/2020.cllrd-1.3</url>
</location>
<part>
<date>2020-05</date>
<extent unit="page">
<start>15</start>
<end>25</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Objective Assessment of Subjective Tasks in Crowdsourcing Applications
%A Haralabopoulos, Giannis
%A Tsikandilakis, Myron
%A Torres Torres, Mercedes
%A McAuley, Derek
%Y Fiumara, James
%Y Cieri, Christopher
%Y Liberman, Mark
%Y Callison-Burch, Chris
%S Proceedings of the LREC 2020 Workshop on “Citizen Linguistics in Language Resource Development”
%D 2020
%8 May
%I European Language Resources Association
%C Marseille, France
%@ 979-10-95546-59-7
%G English
%F haralabopoulos-etal-2020-objective
%X Labelling, or annotation, is the process by which we assign labels to an item with regards to a task. In some Artificial Intelligence problems, such as Computer Vision tasks, the goal is to obtain objective labels. However, in problems such as text and sentiment analysis, subjective labelling is often required. More so when the sentiment analysis deals with actual emotions instead of polarity (positive/negative) . Scientists employ human experts to create these labels, but it is costly and time consuming. Crowdsourcing enables researchers to utilise non-expert knowledge for scientific tasks. From image analysis to semantic annotation, interested researchers can gather a large sample of answers via crowdsourcing platforms in a timely manner. However, non-expert contributions often need to be thoroughly assessed, particularly so when a task is subjective. Researchers have traditionally used ‘Gold Standard’, ‘Thresholding’ and ‘Majority Voting’ as methods to filter non-expert contributions. We argue that these methods are unsuitable for subjective tasks, such as lexicon acquisition and sentiment analysis. We discuss subjectivity in human centered tasks and present a filtering method that defines quality contributors, based on a set of objectively infused terms in a lexicon acquisition task. We evaluate our method against an established lexicon, the diversity of emotions - i.e. subjectivity- and the exclusion of contributions. Our proposed objective evaluation method can be used to assess contributors in subjective tasks that will provide domain agnostic, quality results, with at least 7% improvement over traditional methods.
%U https://aclanthology.org/2020.cllrd-1.3
%P 15-25
Markdown (Informal)
[Objective Assessment of Subjective Tasks in Crowdsourcing Applications](https://aclanthology.org/2020.cllrd-1.3) (Haralabopoulos et al., CLLRD 2020)
ACL