@inproceedings{chakour-sadat-2025-human,
title = "Human-Centered Disability Bias Detection in Large Language Models",
author = "Chakour, Habiba and
Sadat, Fatiha",
editor = "Zhao, Wei and
D{'}Souza, Jennifer and
Eger, Steffen and
Lauscher, Anne and
Hou, Yufang and
Sadat Moosavi, Nafise and
Miller, Tristan and
Lin, Chenghua",
booktitle = "Proceedings of The First Workshop on Human{--}LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)",
month = dec,
year = "2025",
address = "Mumbai, India (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.sciprodllm-1.2/",
pages = "6--18",
ISBN = "979-8-89176-307-4",
abstract = "To promote a more just and inclusive society, developers and researchers are strongly encouraged to design Language Models (LM) with ethical considerations at the forefront, ensuring that the benefits and opportunities of AI are accessible to all users and communities. Incorporating humans in the loop is one approach recognized for mitigating general AI biases. Consequently, the development of new design guidelines and datasets is essential to help AI systems realize their full potential for the benefit of people with disabilities.This study aims to identify disability-related bias in Large Masked Language Models (MLMs), the Electra. A participatory and collaborative research approach was employed, involving three disability organizations to collect information on deaf and hard-of-hearing individuals. Our initial analysis reveals that the studied MLM is highly sensitive to the various identity references used to describe deaf and hard-of-hearing people."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chakour-sadat-2025-human">
<titleInfo>
<title>Human-Centered Disability Bias Detection in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Habiba</namePart>
<namePart type="family">Chakour</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fatiha</namePart>
<namePart type="family">Sadat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The First Workshop on Human–LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jennifer</namePart>
<namePart type="family">D’Souza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steffen</namePart>
<namePart type="family">Eger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne</namePart>
<namePart type="family">Lauscher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufang</namePart>
<namePart type="family">Hou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nafise</namePart>
<namePart type="family">Sadat Moosavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tristan</namePart>
<namePart type="family">Miller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenghua</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mumbai, India (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-307-4</identifier>
</relatedItem>
<abstract>To promote a more just and inclusive society, developers and researchers are strongly encouraged to design Language Models (LM) with ethical considerations at the forefront, ensuring that the benefits and opportunities of AI are accessible to all users and communities. Incorporating humans in the loop is one approach recognized for mitigating general AI biases. Consequently, the development of new design guidelines and datasets is essential to help AI systems realize their full potential for the benefit of people with disabilities.This study aims to identify disability-related bias in Large Masked Language Models (MLMs), the Electra. A participatory and collaborative research approach was employed, involving three disability organizations to collect information on deaf and hard-of-hearing individuals. Our initial analysis reveals that the studied MLM is highly sensitive to the various identity references used to describe deaf and hard-of-hearing people.</abstract>
<identifier type="citekey">chakour-sadat-2025-human</identifier>
<location>
<url>https://aclanthology.org/2025.sciprodllm-1.2/</url>
</location>
<part>
<date>2025-12</date>
<extent unit="page">
<start>6</start>
<end>18</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Human-Centered Disability Bias Detection in Large Language Models
%A Chakour, Habiba
%A Sadat, Fatiha
%Y Zhao, Wei
%Y D’Souza, Jennifer
%Y Eger, Steffen
%Y Lauscher, Anne
%Y Hou, Yufang
%Y Sadat Moosavi, Nafise
%Y Miller, Tristan
%Y Lin, Chenghua
%S Proceedings of The First Workshop on Human–LLM Collaboration for Ethical and Responsible Science Production (SciProdLLM)
%D 2025
%8 December
%I Association for Computational Linguistics
%C Mumbai, India (Hybrid)
%@ 979-8-89176-307-4
%F chakour-sadat-2025-human
%X To promote a more just and inclusive society, developers and researchers are strongly encouraged to design Language Models (LM) with ethical considerations at the forefront, ensuring that the benefits and opportunities of AI are accessible to all users and communities. Incorporating humans in the loop is one approach recognized for mitigating general AI biases. Consequently, the development of new design guidelines and datasets is essential to help AI systems realize their full potential for the benefit of people with disabilities.This study aims to identify disability-related bias in Large Masked Language Models (MLMs), the Electra. A participatory and collaborative research approach was employed, involving three disability organizations to collect information on deaf and hard-of-hearing individuals. Our initial analysis reveals that the studied MLM is highly sensitive to the various identity references used to describe deaf and hard-of-hearing people.
%U https://aclanthology.org/2025.sciprodllm-1.2/
%P 6-18
Markdown (Informal)
[Human-Centered Disability Bias Detection in Large Language Models](https://aclanthology.org/2025.sciprodllm-1.2/) (Chakour & Sadat, SciProdLLM 2025)
ACL