@inproceedings{kumar-etal-2020-fair,
title = "Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings",
author = "Kumar, Vaibhav and
Bhotia, Tenzin and
Kumar, Vaibhav",
editor = "Park, Eunjeong L. and
Hagiwara, Masato and
Milajevs, Dmitrijs and
Liu, Nelson F. and
Chauhan, Geeticka and
Tan, Liling",
booktitle = "Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.nlposs-1.5",
doi = "10.18653/v1/2020.nlposs-1.5",
pages = "26--31",
abstract = "Non-contextual word embedding models have been shown to inherit human-like stereotypical biases of gender, race and religion from the training corpora. To counter this issue, a large body of research has emerged which aims to mitigate these biases while keeping the syntactic and semantic utility of embeddings intact. This paper describes Fair Embedding Engine (FEE), a library for analysing and mitigating gender bias in word embeddings. FEE combines various state of the art techniques for quantifying, visualising and mitigating gender bias in word embeddings under a standard abstraction. FEE will aid practitioners in fast track analysis of existing debiasing methods on their embedding models. Further, it will allow rapid prototyping of new methods by evaluating their performance on a suite of standard metrics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kumar-etal-2020-fair">
<titleInfo>
<title>Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vaibhav</namePart>
<namePart type="family">Kumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tenzin</namePart>
<namePart type="family">Bhotia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eunjeong</namePart>
<namePart type="given">L</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masato</namePart>
<namePart type="family">Hagiwara</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dmitrijs</namePart>
<namePart type="family">Milajevs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nelson</namePart>
<namePart type="given">F</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Geeticka</namePart>
<namePart type="family">Chauhan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Liling</namePart>
<namePart type="family">Tan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Non-contextual word embedding models have been shown to inherit human-like stereotypical biases of gender, race and religion from the training corpora. To counter this issue, a large body of research has emerged which aims to mitigate these biases while keeping the syntactic and semantic utility of embeddings intact. This paper describes Fair Embedding Engine (FEE), a library for analysing and mitigating gender bias in word embeddings. FEE combines various state of the art techniques for quantifying, visualising and mitigating gender bias in word embeddings under a standard abstraction. FEE will aid practitioners in fast track analysis of existing debiasing methods on their embedding models. Further, it will allow rapid prototyping of new methods by evaluating their performance on a suite of standard metrics.</abstract>
<identifier type="citekey">kumar-etal-2020-fair</identifier>
<identifier type="doi">10.18653/v1/2020.nlposs-1.5</identifier>
<location>
<url>https://aclanthology.org/2020.nlposs-1.5</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>26</start>
<end>31</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings
%A Kumar, Vaibhav
%A Bhotia, Tenzin
%Y Park, Eunjeong L.
%Y Hagiwara, Masato
%Y Milajevs, Dmitrijs
%Y Liu, Nelson F.
%Y Chauhan, Geeticka
%Y Tan, Liling
%S Proceedings of Second Workshop for NLP Open Source Software (NLP-OSS)
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F kumar-etal-2020-fair
%X Non-contextual word embedding models have been shown to inherit human-like stereotypical biases of gender, race and religion from the training corpora. To counter this issue, a large body of research has emerged which aims to mitigate these biases while keeping the syntactic and semantic utility of embeddings intact. This paper describes Fair Embedding Engine (FEE), a library for analysing and mitigating gender bias in word embeddings. FEE combines various state of the art techniques for quantifying, visualising and mitigating gender bias in word embeddings under a standard abstraction. FEE will aid practitioners in fast track analysis of existing debiasing methods on their embedding models. Further, it will allow rapid prototyping of new methods by evaluating their performance on a suite of standard metrics.
%R 10.18653/v1/2020.nlposs-1.5
%U https://aclanthology.org/2020.nlposs-1.5
%U https://doi.org/10.18653/v1/2020.nlposs-1.5
%P 26-31
Markdown (Informal)
[Fair Embedding Engine: A Library for Analyzing and Mitigating Gender Bias in Word Embeddings](https://aclanthology.org/2020.nlposs-1.5) (Kumar et al., NLPOSS 2020)
ACL