@inproceedings{mihaylov-shtedritski-2024-elegant,
title = "What an Elegant Bridge: Multilingual {LLM}s are Biased Similarly in Different Languages",
author = "Mihaylov, Viktor and
Shtedritski, Aleksandar",
editor = "Peled-Cohen, Lotem and
Calderon, Nitay and
Lissak, Shir and
Reichart, Roi",
booktitle = "Proceedings of the 1st Workshop on NLP for Science (NLP4Science)",
month = nov,
year = "2024",
address = "Miami, FL, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.nlp4science-1.3",
pages = "16--23",
abstract = "This paper investigates biases of Large Language Models (LLMs) through the lens of grammatical gender. Drawing inspiration from seminal works in psycholinguistics, particularly the study of gender{'}s influence on language perception, we leverage multilingual LLMs to revisit and expand upon the foundational experiments of Boroditsky (2003). Employing LLMs as a novel method for examining psycholinguistic biases related to grammatical gender, we prompt a model to describe nouns with adjectives in various languages, focusing specifically on languages with grammatical gender. In particular, we look at adjective co-occurrences across gender and languages, and train a binary classifier to predict grammatical gender given adjectives an LLM uses to describe a noun. Surprisingly, we find that a simple classifier can not only predict noun gender above chance but also exhibit cross-language transferability. We show that while LLMs may describe words differently in different languages, they are biased similarly.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mihaylov-shtedritski-2024-elegant">
<titleInfo>
<title>What an Elegant Bridge: Multilingual LLMs are Biased Similarly in Different Languages</title>
</titleInfo>
<name type="personal">
<namePart type="given">Viktor</namePart>
<namePart type="family">Mihaylov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aleksandar</namePart>
<namePart type="family">Shtedritski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for Science (NLP4Science)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lotem</namePart>
<namePart type="family">Peled-Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nitay</namePart>
<namePart type="family">Calderon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shir</namePart>
<namePart type="family">Lissak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, FL, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper investigates biases of Large Language Models (LLMs) through the lens of grammatical gender. Drawing inspiration from seminal works in psycholinguistics, particularly the study of gender’s influence on language perception, we leverage multilingual LLMs to revisit and expand upon the foundational experiments of Boroditsky (2003). Employing LLMs as a novel method for examining psycholinguistic biases related to grammatical gender, we prompt a model to describe nouns with adjectives in various languages, focusing specifically on languages with grammatical gender. In particular, we look at adjective co-occurrences across gender and languages, and train a binary classifier to predict grammatical gender given adjectives an LLM uses to describe a noun. Surprisingly, we find that a simple classifier can not only predict noun gender above chance but also exhibit cross-language transferability. We show that while LLMs may describe words differently in different languages, they are biased similarly.</abstract>
<identifier type="citekey">mihaylov-shtedritski-2024-elegant</identifier>
<location>
<url>https://aclanthology.org/2024.nlp4science-1.3</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>16</start>
<end>23</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What an Elegant Bridge: Multilingual LLMs are Biased Similarly in Different Languages
%A Mihaylov, Viktor
%A Shtedritski, Aleksandar
%Y Peled-Cohen, Lotem
%Y Calderon, Nitay
%Y Lissak, Shir
%Y Reichart, Roi
%S Proceedings of the 1st Workshop on NLP for Science (NLP4Science)
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, FL, USA
%F mihaylov-shtedritski-2024-elegant
%X This paper investigates biases of Large Language Models (LLMs) through the lens of grammatical gender. Drawing inspiration from seminal works in psycholinguistics, particularly the study of gender’s influence on language perception, we leverage multilingual LLMs to revisit and expand upon the foundational experiments of Boroditsky (2003). Employing LLMs as a novel method for examining psycholinguistic biases related to grammatical gender, we prompt a model to describe nouns with adjectives in various languages, focusing specifically on languages with grammatical gender. In particular, we look at adjective co-occurrences across gender and languages, and train a binary classifier to predict grammatical gender given adjectives an LLM uses to describe a noun. Surprisingly, we find that a simple classifier can not only predict noun gender above chance but also exhibit cross-language transferability. We show that while LLMs may describe words differently in different languages, they are biased similarly.
%U https://aclanthology.org/2024.nlp4science-1.3
%P 16-23
Markdown (Informal)
[What an Elegant Bridge: Multilingual LLMs are Biased Similarly in Different Languages](https://aclanthology.org/2024.nlp4science-1.3) (Mihaylov & Shtedritski, NLP4Science 2024)
ACL