@inproceedings{rudinger-etal-2017-social,
title = "Social Bias in Elicited Natural Language Inferences",
author = "Rudinger, Rachel and
May, Chandler and
Van Durme, Benjamin",
editor = "Hovy, Dirk and
Spruit, Shannon and
Mitchell, Margaret and
Bender, Emily M. and
Strube, Michael and
Wallach, Hanna",
booktitle = "Proceedings of the First {ACL} Workshop on Ethics in Natural Language Processing",
month = apr,
year = "2017",
address = "Valencia, Spain",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-1609",
doi = "10.18653/v1/W17-1609",
pages = "74--79",
abstract = "We analyze the Stanford Natural Language Inference (SNLI) corpus in an investigation of bias and stereotyping in NLP data. The SNLI human-elicitation protocol makes it prone to amplifying bias and stereotypical associations, which we demonstrate statistically (using pointwise mutual information) and with qualitative examples.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rudinger-etal-2017-social">
<titleInfo>
<title>Social Bias in Elicited Natural Language Inferences</title>
</titleInfo>
<name type="personal">
<namePart type="given">Rachel</namePart>
<namePart type="family">Rudinger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chandler</namePart>
<namePart type="family">May</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Van Durme</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First ACL Workshop on Ethics in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dirk</namePart>
<namePart type="family">Hovy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shannon</namePart>
<namePart type="family">Spruit</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margaret</namePart>
<namePart type="family">Mitchell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Bender</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Strube</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hanna</namePart>
<namePart type="family">Wallach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Valencia, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We analyze the Stanford Natural Language Inference (SNLI) corpus in an investigation of bias and stereotyping in NLP data. The SNLI human-elicitation protocol makes it prone to amplifying bias and stereotypical associations, which we demonstrate statistically (using pointwise mutual information) and with qualitative examples.</abstract>
<identifier type="citekey">rudinger-etal-2017-social</identifier>
<identifier type="doi">10.18653/v1/W17-1609</identifier>
<location>
<url>https://aclanthology.org/W17-1609</url>
</location>
<part>
<date>2017-04</date>
<extent unit="page">
<start>74</start>
<end>79</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Social Bias in Elicited Natural Language Inferences
%A Rudinger, Rachel
%A May, Chandler
%A Van Durme, Benjamin
%Y Hovy, Dirk
%Y Spruit, Shannon
%Y Mitchell, Margaret
%Y Bender, Emily M.
%Y Strube, Michael
%Y Wallach, Hanna
%S Proceedings of the First ACL Workshop on Ethics in Natural Language Processing
%D 2017
%8 April
%I Association for Computational Linguistics
%C Valencia, Spain
%F rudinger-etal-2017-social
%X We analyze the Stanford Natural Language Inference (SNLI) corpus in an investigation of bias and stereotyping in NLP data. The SNLI human-elicitation protocol makes it prone to amplifying bias and stereotypical associations, which we demonstrate statistically (using pointwise mutual information) and with qualitative examples.
%R 10.18653/v1/W17-1609
%U https://aclanthology.org/W17-1609
%U https://doi.org/10.18653/v1/W17-1609
%P 74-79
Markdown (Informal)
[Social Bias in Elicited Natural Language Inferences](https://aclanthology.org/W17-1609) (Rudinger et al., EthNLP 2017)
ACL
- Rachel Rudinger, Chandler May, and Benjamin Van Durme. 2017. Social Bias in Elicited Natural Language Inferences. In Proceedings of the First ACL Workshop on Ethics in Natural Language Processing, pages 74–79, Valencia, Spain. Association for Computational Linguistics.