@inproceedings{kanclerz-etal-2022-ground,
title = "What If Ground Truth Is Subjective? Personalized Deep Neural Hate Speech Detection",
author = "Kanclerz, Kamil and
Gruza, Marcin and
Karanowski, Konrad and
Bielaniewicz, Julita and
Milkowski, Piotr and
Kocon, Jan and
Kazienko, Przemyslaw",
booktitle = "Proceedings of the 1st Workshop on Perspectivist Approaches to NLP @LREC2022",
month = jun,
year = "2022",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2022.nlperspectives-1.6",
pages = "37--45",
abstract = "A unified gold standard commonly exploited in natural language processing (NLP) tasks requires high inter-annotator agreement. However, there are many subjective problems that should respect users individual points of view. Therefore in this paper, we evaluate three different personalized methods on the task of hate speech detection. The user-centered techniques are compared to the generalizing baseline approach. We conduct our experiments on three datasets including single-task and multi-task hate speech detection. For validation purposes, we introduce a new data-split strategy, preventing data leakage between training and testing. In order to better understand the model behavior for individual users, we carried out personalized ablation studies. Our experiments revealed that all models leveraging user preferences in any case provide significantly better results than most frequently used generalized approaches. This supports our overall observation that personalized models should always be considered in all subjective NLP tasks, including hate speech detection.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kanclerz-etal-2022-ground">
<titleInfo>
<title>What If Ground Truth Is Subjective? Personalized Deep Neural Hate Speech Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kamil</namePart>
<namePart type="family">Kanclerz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcin</namePart>
<namePart type="family">Gruza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Konrad</namePart>
<namePart type="family">Karanowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julita</namePart>
<namePart type="family">Bielaniewicz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Piotr</namePart>
<namePart type="family">Milkowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Kocon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Przemyslaw</namePart>
<namePart type="family">Kazienko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Perspectivist Approaches to NLP @LREC2022</title>
</titleInfo>
<originInfo>
<publisher>European Language Resources Association</publisher>
<place>
<placeTerm type="text">Marseille, France</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>A unified gold standard commonly exploited in natural language processing (NLP) tasks requires high inter-annotator agreement. However, there are many subjective problems that should respect users individual points of view. Therefore in this paper, we evaluate three different personalized methods on the task of hate speech detection. The user-centered techniques are compared to the generalizing baseline approach. We conduct our experiments on three datasets including single-task and multi-task hate speech detection. For validation purposes, we introduce a new data-split strategy, preventing data leakage between training and testing. In order to better understand the model behavior for individual users, we carried out personalized ablation studies. Our experiments revealed that all models leveraging user preferences in any case provide significantly better results than most frequently used generalized approaches. This supports our overall observation that personalized models should always be considered in all subjective NLP tasks, including hate speech detection.</abstract>
<identifier type="citekey">kanclerz-etal-2022-ground</identifier>
<location>
<url>https://aclanthology.org/2022.nlperspectives-1.6</url>
</location>
<part>
<date>2022-06</date>
<extent unit="page">
<start>37</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What If Ground Truth Is Subjective? Personalized Deep Neural Hate Speech Detection
%A Kanclerz, Kamil
%A Gruza, Marcin
%A Karanowski, Konrad
%A Bielaniewicz, Julita
%A Milkowski, Piotr
%A Kocon, Jan
%A Kazienko, Przemyslaw
%S Proceedings of the 1st Workshop on Perspectivist Approaches to NLP @LREC2022
%D 2022
%8 June
%I European Language Resources Association
%C Marseille, France
%F kanclerz-etal-2022-ground
%X A unified gold standard commonly exploited in natural language processing (NLP) tasks requires high inter-annotator agreement. However, there are many subjective problems that should respect users individual points of view. Therefore in this paper, we evaluate three different personalized methods on the task of hate speech detection. The user-centered techniques are compared to the generalizing baseline approach. We conduct our experiments on three datasets including single-task and multi-task hate speech detection. For validation purposes, we introduce a new data-split strategy, preventing data leakage between training and testing. In order to better understand the model behavior for individual users, we carried out personalized ablation studies. Our experiments revealed that all models leveraging user preferences in any case provide significantly better results than most frequently used generalized approaches. This supports our overall observation that personalized models should always be considered in all subjective NLP tasks, including hate speech detection.
%U https://aclanthology.org/2022.nlperspectives-1.6
%P 37-45
Markdown (Informal)
[What If Ground Truth Is Subjective? Personalized Deep Neural Hate Speech Detection](https://aclanthology.org/2022.nlperspectives-1.6) (Kanclerz et al., NLPerspectives 2022)
ACL