@inproceedings{devinney-etal-2024-dont,
title = "We Don{'}t Talk About That: Case Studies on Intersectional Analysis of Social Bias in Large Language Models",
author = {Devinney, Hannah and
Bj{\"o}rklund, Jenny and
Bj{\"o}rklund, Henrik},
editor = "Fale{\'n}ska, Agnieszka and
Basta, Christine and
Costa-juss{\`a}, Marta and
Goldfarb-Tarrant, Seraphina and
Nozza, Debora",
booktitle = "Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.gebnlp-1.3",
doi = "10.18653/v1/2024.gebnlp-1.3",
pages = "33--44",
abstract = "Despite concerns that Large Language Models (LLMs) are vectors for reproducing and amplifying social biases such as sexism, transphobia, islamophobia, and racism, there is a lack of work qualitatively analyzing \textit{how} such patterns of bias are generated by LLMs. We use mixed-methods approaches and apply a feminist, intersectional lens to the problem across two language domains, Swedish and English, by generating narrative texts using LLMs. We find that hegemonic norms are consistently reproduced; dominant identities are often treated as {`}default{'}; and discussion of identity itself may be considered {`}inappropriate{'} by the safety features applied to some LLMs. Due to the differing behaviors of models, depending both on their design and the language they are trained on, we observe that strategies of identifying {``}bias{''} must be adapted to individual models and their socio-cultural contexts.{\_}Content warning: This research concerns the identification of harms, including stereotyping, denigration, and erasure of minoritized groups. Examples, including transphobic and racist content, are included and discussed.{\_}",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="devinney-etal-2024-dont">
<titleInfo>
<title>We Don’t Talk About That: Case Studies on Intersectional Analysis of Social Bias in Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Hannah</namePart>
<namePart type="family">Devinney</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jenny</namePart>
<namePart type="family">Björklund</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Henrik</namePart>
<namePart type="family">Björklund</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Agnieszka</namePart>
<namePart type="family">Faleńska</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christine</namePart>
<namePart type="family">Basta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marta</namePart>
<namePart type="family">Costa-jussà</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seraphina</namePart>
<namePart type="family">Goldfarb-Tarrant</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Debora</namePart>
<namePart type="family">Nozza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite concerns that Large Language Models (LLMs) are vectors for reproducing and amplifying social biases such as sexism, transphobia, islamophobia, and racism, there is a lack of work qualitatively analyzing how such patterns of bias are generated by LLMs. We use mixed-methods approaches and apply a feminist, intersectional lens to the problem across two language domains, Swedish and English, by generating narrative texts using LLMs. We find that hegemonic norms are consistently reproduced; dominant identities are often treated as ‘default’; and discussion of identity itself may be considered ‘inappropriate’ by the safety features applied to some LLMs. Due to the differing behaviors of models, depending both on their design and the language they are trained on, we observe that strategies of identifying “bias” must be adapted to individual models and their socio-cultural contexts._Content warning: This research concerns the identification of harms, including stereotyping, denigration, and erasure of minoritized groups. Examples, including transphobic and racist content, are included and discussed._</abstract>
<identifier type="citekey">devinney-etal-2024-dont</identifier>
<identifier type="doi">10.18653/v1/2024.gebnlp-1.3</identifier>
<location>
<url>https://aclanthology.org/2024.gebnlp-1.3</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>33</start>
<end>44</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T We Don’t Talk About That: Case Studies on Intersectional Analysis of Social Bias in Large Language Models
%A Devinney, Hannah
%A Björklund, Jenny
%A Björklund, Henrik
%Y Faleńska, Agnieszka
%Y Basta, Christine
%Y Costa-jussà, Marta
%Y Goldfarb-Tarrant, Seraphina
%Y Nozza, Debora
%S Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F devinney-etal-2024-dont
%X Despite concerns that Large Language Models (LLMs) are vectors for reproducing and amplifying social biases such as sexism, transphobia, islamophobia, and racism, there is a lack of work qualitatively analyzing how such patterns of bias are generated by LLMs. We use mixed-methods approaches and apply a feminist, intersectional lens to the problem across two language domains, Swedish and English, by generating narrative texts using LLMs. We find that hegemonic norms are consistently reproduced; dominant identities are often treated as ‘default’; and discussion of identity itself may be considered ‘inappropriate’ by the safety features applied to some LLMs. Due to the differing behaviors of models, depending both on their design and the language they are trained on, we observe that strategies of identifying “bias” must be adapted to individual models and their socio-cultural contexts._Content warning: This research concerns the identification of harms, including stereotyping, denigration, and erasure of minoritized groups. Examples, including transphobic and racist content, are included and discussed._
%R 10.18653/v1/2024.gebnlp-1.3
%U https://aclanthology.org/2024.gebnlp-1.3
%U https://doi.org/10.18653/v1/2024.gebnlp-1.3
%P 33-44
Markdown (Informal)
[We Don’t Talk About That: Case Studies on Intersectional Analysis of Social Bias in Large Language Models](https://aclanthology.org/2024.gebnlp-1.3) (Devinney et al., GeBNLP-WS 2024)
ACL