@inproceedings{sahoo-etal-2022-detecting,
title = "Detecting Unintended Social Bias in Toxic Language Datasets",
author = "Sahoo, Nihar and
Gupta, Himanshu and
Bhattacharyya, Pushpak",
editor = "Fokkens, Antske and
Srikumar, Vivek",
booktitle = "Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.conll-1.10",
doi = "10.18653/v1/2022.conll-1.10",
pages = "132--143",
abstract = "With the rise of online hate speech, automatic detection of Hate Speech, Offensive texts as a natural language processing task is getting popular. However, very little research has been done to detect unintended social bias from these toxic language datasets. This paper introduces a new dataset ToxicBias curated from the existing dataset of Kaggle competition named {``}Jigsaw Unintended Bias in Toxicity Classification{''}. We aim to detect social biases, their categories, and targeted groups. The dataset contains instances annotated for five different bias categories, viz., gender, race/ethnicity, religion, political, and LGBTQ. We train transformer-based models using our curated datasets and report baseline performance for bias identification, target generation, and bias implications. Model biases and their mitigation are also discussed in detail. Our study motivates a systematic extraction of social bias data from toxic language datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sahoo-etal-2022-detecting">
<titleInfo>
<title>Detecting Unintended Social Bias in Toxic Language Datasets</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nihar</namePart>
<namePart type="family">Sahoo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Himanshu</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pushpak</namePart>
<namePart type="family">Bhattacharyya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Antske</namePart>
<namePart type="family">Fokkens</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the rise of online hate speech, automatic detection of Hate Speech, Offensive texts as a natural language processing task is getting popular. However, very little research has been done to detect unintended social bias from these toxic language datasets. This paper introduces a new dataset ToxicBias curated from the existing dataset of Kaggle competition named “Jigsaw Unintended Bias in Toxicity Classification”. We aim to detect social biases, their categories, and targeted groups. The dataset contains instances annotated for five different bias categories, viz., gender, race/ethnicity, religion, political, and LGBTQ. We train transformer-based models using our curated datasets and report baseline performance for bias identification, target generation, and bias implications. Model biases and their mitigation are also discussed in detail. Our study motivates a systematic extraction of social bias data from toxic language datasets.</abstract>
<identifier type="citekey">sahoo-etal-2022-detecting</identifier>
<identifier type="doi">10.18653/v1/2022.conll-1.10</identifier>
<location>
<url>https://aclanthology.org/2022.conll-1.10</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>132</start>
<end>143</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Detecting Unintended Social Bias in Toxic Language Datasets
%A Sahoo, Nihar
%A Gupta, Himanshu
%A Bhattacharyya, Pushpak
%Y Fokkens, Antske
%Y Srikumar, Vivek
%S Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F sahoo-etal-2022-detecting
%X With the rise of online hate speech, automatic detection of Hate Speech, Offensive texts as a natural language processing task is getting popular. However, very little research has been done to detect unintended social bias from these toxic language datasets. This paper introduces a new dataset ToxicBias curated from the existing dataset of Kaggle competition named “Jigsaw Unintended Bias in Toxicity Classification”. We aim to detect social biases, their categories, and targeted groups. The dataset contains instances annotated for five different bias categories, viz., gender, race/ethnicity, religion, political, and LGBTQ. We train transformer-based models using our curated datasets and report baseline performance for bias identification, target generation, and bias implications. Model biases and their mitigation are also discussed in detail. Our study motivates a systematic extraction of social bias data from toxic language datasets.
%R 10.18653/v1/2022.conll-1.10
%U https://aclanthology.org/2022.conll-1.10
%U https://doi.org/10.18653/v1/2022.conll-1.10
%P 132-143
Markdown (Informal)
[Detecting Unintended Social Bias in Toxic Language Datasets](https://aclanthology.org/2022.conll-1.10) (Sahoo et al., CoNLL 2022)
ACL
- Nihar Sahoo, Himanshu Gupta, and Pushpak Bhattacharyya. 2022. Detecting Unintended Social Bias in Toxic Language Datasets. In Proceedings of the 26th Conference on Computational Natural Language Learning (CoNLL), pages 132–143, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.