@inproceedings{wich-etal-2020-impact,
title = "Impact of Politically Biased Data on Hate Speech Classification",
author = "Wich, Maximilian and
Bauer, Jan and
Groh, Georg",
editor = "Akiwowo, Seyi and
Vidgen, Bertie and
Prabhakaran, Vinodkumar and
Waseem, Zeerak",
booktitle = "Proceedings of the Fourth Workshop on Online Abuse and Harms",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.alw-1.7",
doi = "10.18653/v1/2020.alw-1.7",
pages = "54--64",
abstract = "One challenge that social media platforms are facing nowadays is hate speech. Hence, automatic hate speech detection has been increasingly researched in recent years - in particular with the rise of deep learning. A problem of these models is their vulnerability to undesirable bias in training data. We investigate the impact of political bias on hate speech classification by constructing three politically-biased data sets (left-wing, right-wing, politically neutral) and compare the performance of classifiers trained on them. We show that (1) political bias negatively impairs the performance of hate speech classifiers and (2) an explainable machine learning model can help to visualize such bias within the training data. The results show that political bias in training data has an impact on hate speech classification and can become a serious issue.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wich-etal-2020-impact">
<titleInfo>
<title>Impact of Politically Biased Data on Hate Speech Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maximilian</namePart>
<namePart type="family">Wich</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jan</namePart>
<namePart type="family">Bauer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georg</namePart>
<namePart type="family">Groh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fourth Workshop on Online Abuse and Harms</title>
</titleInfo>
<name type="personal">
<namePart type="given">Seyi</namePart>
<namePart type="family">Akiwowo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bertie</namePart>
<namePart type="family">Vidgen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zeerak</namePart>
<namePart type="family">Waseem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>One challenge that social media platforms are facing nowadays is hate speech. Hence, automatic hate speech detection has been increasingly researched in recent years - in particular with the rise of deep learning. A problem of these models is their vulnerability to undesirable bias in training data. We investigate the impact of political bias on hate speech classification by constructing three politically-biased data sets (left-wing, right-wing, politically neutral) and compare the performance of classifiers trained on them. We show that (1) political bias negatively impairs the performance of hate speech classifiers and (2) an explainable machine learning model can help to visualize such bias within the training data. The results show that political bias in training data has an impact on hate speech classification and can become a serious issue.</abstract>
<identifier type="citekey">wich-etal-2020-impact</identifier>
<identifier type="doi">10.18653/v1/2020.alw-1.7</identifier>
<location>
<url>https://aclanthology.org/2020.alw-1.7</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>54</start>
<end>64</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Impact of Politically Biased Data on Hate Speech Classification
%A Wich, Maximilian
%A Bauer, Jan
%A Groh, Georg
%Y Akiwowo, Seyi
%Y Vidgen, Bertie
%Y Prabhakaran, Vinodkumar
%Y Waseem, Zeerak
%S Proceedings of the Fourth Workshop on Online Abuse and Harms
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F wich-etal-2020-impact
%X One challenge that social media platforms are facing nowadays is hate speech. Hence, automatic hate speech detection has been increasingly researched in recent years - in particular with the rise of deep learning. A problem of these models is their vulnerability to undesirable bias in training data. We investigate the impact of political bias on hate speech classification by constructing three politically-biased data sets (left-wing, right-wing, politically neutral) and compare the performance of classifiers trained on them. We show that (1) political bias negatively impairs the performance of hate speech classifiers and (2) an explainable machine learning model can help to visualize such bias within the training data. The results show that political bias in training data has an impact on hate speech classification and can become a serious issue.
%R 10.18653/v1/2020.alw-1.7
%U https://aclanthology.org/2020.alw-1.7
%U https://doi.org/10.18653/v1/2020.alw-1.7
%P 54-64
Markdown (Informal)
[Impact of Politically Biased Data on Hate Speech Classification](https://aclanthology.org/2020.alw-1.7) (Wich et al., ALW 2020)
ACL