@inproceedings{abercrombie-etal-2023-temporal,
title = "Temporal and Second Language Influence on Intra-Annotator Agreement and Stability in Hate Speech Labelling",
author = "Abercrombie, Gavin and
Hovy, Dirk and
Prabhakaran, Vinodkumar",
editor = "Prange, Jakob and
Friedrich, Annemarie",
booktitle = "Proceedings of the 17th Linguistic Annotation Workshop (LAW-XVII)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.law-1.10",
doi = "10.18653/v1/2023.law-1.10",
pages = "96--103",
abstract = "Much work in natural language processing (NLP) relies on human annotation. The majority of this implicitly assumes that annotator{'}s labels are temporally stable, although the reality is that human judgements are rarely consistent over time. As a subjective annotation task, hate speech labels depend on annotator{'}s emotional and moral reactions to the language used to convey the message. Studies in Cognitive Science reveal a {`}foreign language effect{'}, whereby people take differing moral positions and perceive offensive phrases to be weaker in their second languages. Does this affect annotations as well? We conduct an experiment to investigate the impacts of (1) time and (2) different language conditions (English and German) on measurements of intra-annotator agreement in a hate speech labelling task. While we do not observe the expected lower stability in the different language condition, we find that overall agreement is significantly lower than is implicitly assumed in annotation tasks, which has important implications for dataset reproducibility in NLP.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="abercrombie-etal-2023-temporal">
<titleInfo>
<title>Temporal and Second Language Influence on Intra-Annotator Agreement and Stability in Hate Speech Labelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gavin</namePart>
<namePart type="family">Abercrombie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Linguistic Annotation Workshop (LAW-XVII)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jakob</namePart>
<namePart type="family">Prange</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Annemarie</namePart>
<namePart type="family">Friedrich</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Much work in natural language processing (NLP) relies on human annotation. The majority of this implicitly assumes that annotator’s labels are temporally stable, although the reality is that human judgements are rarely consistent over time. As a subjective annotation task, hate speech labels depend on annotator’s emotional and moral reactions to the language used to convey the message. Studies in Cognitive Science reveal a ‘foreign language effect’, whereby people take differing moral positions and perceive offensive phrases to be weaker in their second languages. Does this affect annotations as well? We conduct an experiment to investigate the impacts of (1) time and (2) different language conditions (English and German) on measurements of intra-annotator agreement in a hate speech labelling task. While we do not observe the expected lower stability in the different language condition, we find that overall agreement is significantly lower than is implicitly assumed in annotation tasks, which has important implications for dataset reproducibility in NLP.</abstract>
<identifier type="citekey">abercrombie-etal-2023-temporal</identifier>
<identifier type="doi">10.18653/v1/2023.law-1.10</identifier>
<location>
<url>https://aclanthology.org/2023.law-1.10</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>96</start>
<end>103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Temporal and Second Language Influence on Intra-Annotator Agreement and Stability in Hate Speech Labelling
%A Abercrombie, Gavin
%A Hovy, Dirk
%A Prabhakaran, Vinodkumar
%Y Prange, Jakob
%Y Friedrich, Annemarie
%S Proceedings of the 17th Linguistic Annotation Workshop (LAW-XVII)
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F abercrombie-etal-2023-temporal
%X Much work in natural language processing (NLP) relies on human annotation. The majority of this implicitly assumes that annotator’s labels are temporally stable, although the reality is that human judgements are rarely consistent over time. As a subjective annotation task, hate speech labels depend on annotator’s emotional and moral reactions to the language used to convey the message. Studies in Cognitive Science reveal a ‘foreign language effect’, whereby people take differing moral positions and perceive offensive phrases to be weaker in their second languages. Does this affect annotations as well? We conduct an experiment to investigate the impacts of (1) time and (2) different language conditions (English and German) on measurements of intra-annotator agreement in a hate speech labelling task. While we do not observe the expected lower stability in the different language condition, we find that overall agreement is significantly lower than is implicitly assumed in annotation tasks, which has important implications for dataset reproducibility in NLP.
%R 10.18653/v1/2023.law-1.10
%U https://aclanthology.org/2023.law-1.10
%U https://doi.org/10.18653/v1/2023.law-1.10
%P 96-103
Markdown (Informal)
[Temporal and Second Language Influence on Intra-Annotator Agreement and Stability in Hate Speech Labelling](https://aclanthology.org/2023.law-1.10) (Abercrombie et al., LAW 2023)
ACL