@inproceedings{davidson-etal-2019-racial,
title = "Racial Bias in Hate Speech and Abusive Language Detection Datasets",
author = "Davidson, Thomas and
Bhattacharya, Debasmita and
Weber, Ingmar",
editor = "Roberts, Sarah T. and
Tetreault, Joel and
Prabhakaran, Vinodkumar and
Waseem, Zeerak",
booktitle = "Proceedings of the Third Workshop on Abusive Language Online",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3504",
doi = "10.18653/v1/W19-3504",
pages = "25--35",
abstract = "Technologies for abusive language detection are being developed and applied with little consideration of their potential biases. We examine racial bias in five different sets of Twitter data annotated for hate speech and abusive language. We train classifiers on these datasets and compare the predictions of these classifiers on tweets written in African-American English with those written in Standard American English. The results show evidence of systematic racial bias in all datasets, as classifiers trained on them tend to predict that tweets written in African-American English are abusive at substantially higher rates. If these abusive language detection systems are used in the field they will therefore have a disproportionate negative impact on African-American social media users. Consequently, these systems may discriminate against the groups who are often the targets of the abuse we are trying to detect.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="davidson-etal-2019-racial">
<titleInfo>
<title>Racial Bias in Hate Speech and Abusive Language Detection Datasets</title>
</titleInfo>
<name type="personal">
<namePart type="given">Thomas</namePart>
<namePart type="family">Davidson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debasmita</namePart>
<namePart type="family">Bhattacharya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ingmar</namePart>
<namePart type="family">Weber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Abusive Language Online</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="given">T</namePart>
<namePart type="family">Roberts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joel</namePart>
<namePart type="family">Tetreault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vinodkumar</namePart>
<namePart type="family">Prabhakaran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zeerak</namePart>
<namePart type="family">Waseem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Technologies for abusive language detection are being developed and applied with little consideration of their potential biases. We examine racial bias in five different sets of Twitter data annotated for hate speech and abusive language. We train classifiers on these datasets and compare the predictions of these classifiers on tweets written in African-American English with those written in Standard American English. The results show evidence of systematic racial bias in all datasets, as classifiers trained on them tend to predict that tweets written in African-American English are abusive at substantially higher rates. If these abusive language detection systems are used in the field they will therefore have a disproportionate negative impact on African-American social media users. Consequently, these systems may discriminate against the groups who are often the targets of the abuse we are trying to detect.</abstract>
<identifier type="citekey">davidson-etal-2019-racial</identifier>
<identifier type="doi">10.18653/v1/W19-3504</identifier>
<location>
<url>https://aclanthology.org/W19-3504</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>25</start>
<end>35</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Racial Bias in Hate Speech and Abusive Language Detection Datasets
%A Davidson, Thomas
%A Bhattacharya, Debasmita
%A Weber, Ingmar
%Y Roberts, Sarah T.
%Y Tetreault, Joel
%Y Prabhakaran, Vinodkumar
%Y Waseem, Zeerak
%S Proceedings of the Third Workshop on Abusive Language Online
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F davidson-etal-2019-racial
%X Technologies for abusive language detection are being developed and applied with little consideration of their potential biases. We examine racial bias in five different sets of Twitter data annotated for hate speech and abusive language. We train classifiers on these datasets and compare the predictions of these classifiers on tweets written in African-American English with those written in Standard American English. The results show evidence of systematic racial bias in all datasets, as classifiers trained on them tend to predict that tweets written in African-American English are abusive at substantially higher rates. If these abusive language detection systems are used in the field they will therefore have a disproportionate negative impact on African-American social media users. Consequently, these systems may discriminate against the groups who are often the targets of the abuse we are trying to detect.
%R 10.18653/v1/W19-3504
%U https://aclanthology.org/W19-3504
%U https://doi.org/10.18653/v1/W19-3504
%P 25-35
Markdown (Informal)
[Racial Bias in Hate Speech and Abusive Language Detection Datasets](https://aclanthology.org/W19-3504) (Davidson et al., ALW 2019)
ACL