@inproceedings{herold-etal-2022-applying,
title = "Applying the Stereotype Content Model to assess disability bias in popular pre-trained {NLP} models underlying {AI}-based assistive technologies",
author = "Herold, Brienna and
Waller, James and
Kushalnagar, Raja",
editor = "Ebling, Sarah and
Prud{'}hommeaux, Emily and
Vaidyanathan, Preethi",
booktitle = "Ninth Workshop on Speech and Language Processing for Assistive Technologies (SLPAT-2022)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.slpat-1.8",
doi = "10.18653/v1/2022.slpat-1.8",
pages = "58--65",
abstract = "Stereotypes are a positive or negative, generalized, and often widely shared belief about the attributes of certain groups of people, such as people with sensory disabilities. If stereotypes manifest in assistive technologies used by deaf or blind people, they can harm the user in a number of ways, especially considering the vulnerable nature of the target population. AI models underlying assistive technologies have been shown to contain biased stereotypes, including racial, gender, and disability biases. We build on this work to present a psychology-based stereotype assessment of the representation of disability, deafness, and blindness in BERT using the Stereotype Content Model. We show that BERT contains disability bias, and that this bias differs along established stereotype dimensions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="herold-etal-2022-applying">
<titleInfo>
<title>Applying the Stereotype Content Model to assess disability bias in popular pre-trained NLP models underlying AI-based assistive technologies</title>
</titleInfo>
<name type="personal">
<namePart type="given">Brienna</namePart>
<namePart type="family">Herold</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Waller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raja</namePart>
<namePart type="family">Kushalnagar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Ninth Workshop on Speech and Language Processing for Assistive Technologies (SLPAT-2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sarah</namePart>
<namePart type="family">Ebling</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="family">Prud’hommeaux</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preethi</namePart>
<namePart type="family">Vaidyanathan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Stereotypes are a positive or negative, generalized, and often widely shared belief about the attributes of certain groups of people, such as people with sensory disabilities. If stereotypes manifest in assistive technologies used by deaf or blind people, they can harm the user in a number of ways, especially considering the vulnerable nature of the target population. AI models underlying assistive technologies have been shown to contain biased stereotypes, including racial, gender, and disability biases. We build on this work to present a psychology-based stereotype assessment of the representation of disability, deafness, and blindness in BERT using the Stereotype Content Model. We show that BERT contains disability bias, and that this bias differs along established stereotype dimensions.</abstract>
<identifier type="citekey">herold-etal-2022-applying</identifier>
<identifier type="doi">10.18653/v1/2022.slpat-1.8</identifier>
<location>
<url>https://aclanthology.org/2022.slpat-1.8</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>58</start>
<end>65</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Applying the Stereotype Content Model to assess disability bias in popular pre-trained NLP models underlying AI-based assistive technologies
%A Herold, Brienna
%A Waller, James
%A Kushalnagar, Raja
%Y Ebling, Sarah
%Y Prud’hommeaux, Emily
%Y Vaidyanathan, Preethi
%S Ninth Workshop on Speech and Language Processing for Assistive Technologies (SLPAT-2022)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F herold-etal-2022-applying
%X Stereotypes are a positive or negative, generalized, and often widely shared belief about the attributes of certain groups of people, such as people with sensory disabilities. If stereotypes manifest in assistive technologies used by deaf or blind people, they can harm the user in a number of ways, especially considering the vulnerable nature of the target population. AI models underlying assistive technologies have been shown to contain biased stereotypes, including racial, gender, and disability biases. We build on this work to present a psychology-based stereotype assessment of the representation of disability, deafness, and blindness in BERT using the Stereotype Content Model. We show that BERT contains disability bias, and that this bias differs along established stereotype dimensions.
%R 10.18653/v1/2022.slpat-1.8
%U https://aclanthology.org/2022.slpat-1.8
%U https://doi.org/10.18653/v1/2022.slpat-1.8
%P 58-65
Markdown (Informal)
[Applying the Stereotype Content Model to assess disability bias in popular pre-trained NLP models underlying AI-based assistive technologies](https://aclanthology.org/2022.slpat-1.8) (Herold et al., SLPAT 2022)
ACL