@inproceedings{xie-etal-2024-addressing,
title = "Addressing Healthcare-related Racial and {LGBTQ}+ Biases in Pretrained Language Models",
author = "Xie, Sean and
Hassanpour, Saeed and
Vosoughi, Soroush",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.278",
doi = "10.18653/v1/2024.findings-naacl.278",
pages = "4451--4464",
abstract = "Recent studies have highlighted the issue of Pretrained Language Models (PLMs) inadvertently propagating social stigmas and stereotypes, a critical concern given their widespread use. This is particularly problematic in sensitive areas like healthcare, where such biases could lead to detrimental outcomes. Our research addresses this by adapting two intrinsic bias benchmarks to quantify racial and LGBTQ+ biases in prevalent PLMs. We also empirically evaluate the effectiveness of various debiasing methods in mitigating these biases. Furthermore, we assess the impact of debiasing on both Natural Language Understanding and specific biomedical applications. Our findings reveal that while PLMs commonly exhibit healthcare-related racial and LGBTQ+ biases, the applied debiasing techniques successfully reduce these biases without compromising the models{'} performance in downstream tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="xie-etal-2024-addressing">
<titleInfo>
<title>Addressing Healthcare-related Racial and LGBTQ+ Biases in Pretrained Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sean</namePart>
<namePart type="family">Xie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Saeed</namePart>
<namePart type="family">Hassanpour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soroush</namePart>
<namePart type="family">Vosoughi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent studies have highlighted the issue of Pretrained Language Models (PLMs) inadvertently propagating social stigmas and stereotypes, a critical concern given their widespread use. This is particularly problematic in sensitive areas like healthcare, where such biases could lead to detrimental outcomes. Our research addresses this by adapting two intrinsic bias benchmarks to quantify racial and LGBTQ+ biases in prevalent PLMs. We also empirically evaluate the effectiveness of various debiasing methods in mitigating these biases. Furthermore, we assess the impact of debiasing on both Natural Language Understanding and specific biomedical applications. Our findings reveal that while PLMs commonly exhibit healthcare-related racial and LGBTQ+ biases, the applied debiasing techniques successfully reduce these biases without compromising the models’ performance in downstream tasks.</abstract>
<identifier type="citekey">xie-etal-2024-addressing</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.278</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.278</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>4451</start>
<end>4464</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Addressing Healthcare-related Racial and LGBTQ+ Biases in Pretrained Language Models
%A Xie, Sean
%A Hassanpour, Saeed
%A Vosoughi, Soroush
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F xie-etal-2024-addressing
%X Recent studies have highlighted the issue of Pretrained Language Models (PLMs) inadvertently propagating social stigmas and stereotypes, a critical concern given their widespread use. This is particularly problematic in sensitive areas like healthcare, where such biases could lead to detrimental outcomes. Our research addresses this by adapting two intrinsic bias benchmarks to quantify racial and LGBTQ+ biases in prevalent PLMs. We also empirically evaluate the effectiveness of various debiasing methods in mitigating these biases. Furthermore, we assess the impact of debiasing on both Natural Language Understanding and specific biomedical applications. Our findings reveal that while PLMs commonly exhibit healthcare-related racial and LGBTQ+ biases, the applied debiasing techniques successfully reduce these biases without compromising the models’ performance in downstream tasks.
%R 10.18653/v1/2024.findings-naacl.278
%U https://aclanthology.org/2024.findings-naacl.278
%U https://doi.org/10.18653/v1/2024.findings-naacl.278
%P 4451-4464
Markdown (Informal)
[Addressing Healthcare-related Racial and LGBTQ+ Biases in Pretrained Language Models](https://aclanthology.org/2024.findings-naacl.278) (Xie et al., Findings 2024)
ACL