@inproceedings{prost-etal-2019-debiasing,
title = "Debiasing Embeddings for Reduced Gender Bias in Text Classification",
author = "Prost, Flavien and
Thain, Nithum and
Bolukbasi, Tolga",
editor = "Costa-juss{\`a}, Marta R. and
Hardmeier, Christian and
Radford, Will and
Webster, Kellie",
booktitle = "Proceedings of the First Workshop on Gender Bias in Natural Language Processing",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3810",
doi = "10.18653/v1/W19-3810",
pages = "69--75",
abstract = "(Bolukbasi et al., 2016) demonstrated that pretrained word embeddings can inherit gender bias from the data they were trained on. We investigate how this bias affects downstream classification tasks, using the case study of occupation classification (De-Arteaga et al., 2019). We show that traditional techniques for debiasing embeddings can actually worsen the bias of the downstream classifier by providing a less noisy channel for communicating gender information. With a relatively minor adjustment, however, we show how these same techniques can be used to simultaneously reduce bias and maintain high classification accuracy.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="prost-etal-2019-debiasing">
<titleInfo>
<title>Debiasing Embeddings for Reduced Gender Bias in Text Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Flavien</namePart>
<namePart type="family">Prost</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nithum</namePart>
<namePart type="family">Thain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tolga</namePart>
<namePart type="family">Bolukbasi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Gender Bias in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="given">R</namePart>
<namePart type="family">Costa-jussà</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Will</namePart>
<namePart type="family">Radford</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kellie</namePart>
<namePart type="family">Webster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>(Bolukbasi et al., 2016) demonstrated that pretrained word embeddings can inherit gender bias from the data they were trained on. We investigate how this bias affects downstream classification tasks, using the case study of occupation classification (De-Arteaga et al., 2019). We show that traditional techniques for debiasing embeddings can actually worsen the bias of the downstream classifier by providing a less noisy channel for communicating gender information. With a relatively minor adjustment, however, we show how these same techniques can be used to simultaneously reduce bias and maintain high classification accuracy.</abstract>
<identifier type="citekey">prost-etal-2019-debiasing</identifier>
<identifier type="doi">10.18653/v1/W19-3810</identifier>
<location>
<url>https://aclanthology.org/W19-3810</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>69</start>
<end>75</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Debiasing Embeddings for Reduced Gender Bias in Text Classification
%A Prost, Flavien
%A Thain, Nithum
%A Bolukbasi, Tolga
%Y Costa-jussà, Marta R.
%Y Hardmeier, Christian
%Y Radford, Will
%Y Webster, Kellie
%S Proceedings of the First Workshop on Gender Bias in Natural Language Processing
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F prost-etal-2019-debiasing
%X (Bolukbasi et al., 2016) demonstrated that pretrained word embeddings can inherit gender bias from the data they were trained on. We investigate how this bias affects downstream classification tasks, using the case study of occupation classification (De-Arteaga et al., 2019). We show that traditional techniques for debiasing embeddings can actually worsen the bias of the downstream classifier by providing a less noisy channel for communicating gender information. With a relatively minor adjustment, however, we show how these same techniques can be used to simultaneously reduce bias and maintain high classification accuracy.
%R 10.18653/v1/W19-3810
%U https://aclanthology.org/W19-3810
%U https://doi.org/10.18653/v1/W19-3810
%P 69-75
Markdown (Informal)
[Debiasing Embeddings for Reduced Gender Bias in Text Classification](https://aclanthology.org/W19-3810) (Prost et al., GeBNLP 2019)
ACL