@inproceedings{venkit-etal-2022-study,
title = "A Study of Implicit Bias in Pretrained Language Models against People with Disabilities",
author = "Venkit, Pranav Narayanan and
Srinath, Mukund and
Wilson, Shomir",
booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2022.coling-1.113",
pages = "1324--1332",
abstract = "Pretrained language models (PLMs) have been shown to exhibit sociodemographic biases, such as against gender and race, raising concerns of downstream biases in language technologies. However, PLMs{'} biases against people with disabilities (PWDs) have received little attention, in spite of their potential to cause similar harms. Using perturbation sensitivity analysis, we test an assortment of popular word embedding-based and transformer-based PLMs and show significant biases against PWDs in all of them. The results demonstrate how models trained on large corpora widely favor ableist language.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="venkit-etal-2022-study">
<titleInfo>
<title>A Study of Implicit Bias in Pretrained Language Models against People with Disabilities</title>
</titleInfo>
<name type="personal">
<namePart type="given">Pranav</namePart>
<namePart type="given">Narayanan</namePart>
<namePart type="family">Venkit</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mukund</namePart>
<namePart type="family">Srinath</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shomir</namePart>
<namePart type="family">Wilson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 29th International Conference on Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pretrained language models (PLMs) have been shown to exhibit sociodemographic biases, such as against gender and race, raising concerns of downstream biases in language technologies. However, PLMs’ biases against people with disabilities (PWDs) have received little attention, in spite of their potential to cause similar harms. Using perturbation sensitivity analysis, we test an assortment of popular word embedding-based and transformer-based PLMs and show significant biases against PWDs in all of them. The results demonstrate how models trained on large corpora widely favor ableist language.</abstract>
<identifier type="citekey">venkit-etal-2022-study</identifier>
<location>
<url>https://aclanthology.org/2022.coling-1.113</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>1324</start>
<end>1332</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Study of Implicit Bias in Pretrained Language Models against People with Disabilities
%A Venkit, Pranav Narayanan
%A Srinath, Mukund
%A Wilson, Shomir
%S Proceedings of the 29th International Conference on Computational Linguistics
%D 2022
%8 October
%I International Committee on Computational Linguistics
%C Gyeongju, Republic of Korea
%F venkit-etal-2022-study
%X Pretrained language models (PLMs) have been shown to exhibit sociodemographic biases, such as against gender and race, raising concerns of downstream biases in language technologies. However, PLMs’ biases against people with disabilities (PWDs) have received little attention, in spite of their potential to cause similar harms. Using perturbation sensitivity analysis, we test an assortment of popular word embedding-based and transformer-based PLMs and show significant biases against PWDs in all of them. The results demonstrate how models trained on large corpora widely favor ableist language.
%U https://aclanthology.org/2022.coling-1.113
%P 1324-1332
Markdown (Informal)
[A Study of Implicit Bias in Pretrained Language Models against People with Disabilities](https://aclanthology.org/2022.coling-1.113) (Venkit et al., COLING 2022)
ACL