@inproceedings{schuster-etal-2025-profiling,
title = "Profiling Bias in {LLM}s: {Stereotype} Dimensions in Contextual Word Embeddings",
author = "Schuster, Carolin M. and
Roman, Maria-Alexandra and
Ghatiwala, Shashwat and
Groh, Georg",
editor = "Johansson, Richard and
Stymne, Sara",
booktitle = "Proceedings of the Joint 25th Nordic Conference on Computational Linguistics and 11th Baltic Conference on Human Language Technologies (NoDaLiDa/Baltic-HLT 2025)",
month = mar,
year = "2025",
address = "Tallinn, Estonia",
publisher = "University of Tartu Library",
url = "https://aclanthology.org/2025.nodalida-1.65/",
pages = "639--650",
ISBN = "978-9908-53-109-0",
abstract = "Large language models (LLMs) are the foundation of the current successes of artificial intelligence (AI), however, they are unavoidably biased. To effectively communicate the risks and encourage mitigation efforts these models need adequate and intuitive descriptions of their discriminatory properties, appropriate for all audiences of AI. We suggest bias profiles with respect to stereotype dimensions based on dictionaries from social psychology research. Along these dimensions we investigate gender bias in contextual embeddings, across contexts and layers, and generate stereotype profiles for twelve different LLMs, demonstrating their intuition and use case for exposing and visualizing bias."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="schuster-etal-2025-profiling">
<titleInfo>
<title>Profiling Bias in LLMs: Stereotype Dimensions in Contextual Word Embeddings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Carolin</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Schuster</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria-Alexandra</namePart>
<namePart type="family">Roman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shashwat</namePart>
<namePart type="family">Ghatiwala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georg</namePart>
<namePart type="family">Groh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-03</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Joint 25th Nordic Conference on Computational Linguistics and 11th Baltic Conference on Human Language Technologies (NoDaLiDa/Baltic-HLT 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Richard</namePart>
<namePart type="family">Johansson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sara</namePart>
<namePart type="family">Stymne</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>University of Tartu Library</publisher>
<place>
<placeTerm type="text">Tallinn, Estonia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">978-9908-53-109-0</identifier>
</relatedItem>
<abstract>Large language models (LLMs) are the foundation of the current successes of artificial intelligence (AI), however, they are unavoidably biased. To effectively communicate the risks and encourage mitigation efforts these models need adequate and intuitive descriptions of their discriminatory properties, appropriate for all audiences of AI. We suggest bias profiles with respect to stereotype dimensions based on dictionaries from social psychology research. Along these dimensions we investigate gender bias in contextual embeddings, across contexts and layers, and generate stereotype profiles for twelve different LLMs, demonstrating their intuition and use case for exposing and visualizing bias.</abstract>
<identifier type="citekey">schuster-etal-2025-profiling</identifier>
<location>
<url>https://aclanthology.org/2025.nodalida-1.65/</url>
</location>
<part>
<date>2025-03</date>
<extent unit="page">
<start>639</start>
<end>650</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Profiling Bias in LLMs: Stereotype Dimensions in Contextual Word Embeddings
%A Schuster, Carolin M.
%A Roman, Maria-Alexandra
%A Ghatiwala, Shashwat
%A Groh, Georg
%Y Johansson, Richard
%Y Stymne, Sara
%S Proceedings of the Joint 25th Nordic Conference on Computational Linguistics and 11th Baltic Conference on Human Language Technologies (NoDaLiDa/Baltic-HLT 2025)
%D 2025
%8 March
%I University of Tartu Library
%C Tallinn, Estonia
%@ 978-9908-53-109-0
%F schuster-etal-2025-profiling
%X Large language models (LLMs) are the foundation of the current successes of artificial intelligence (AI), however, they are unavoidably biased. To effectively communicate the risks and encourage mitigation efforts these models need adequate and intuitive descriptions of their discriminatory properties, appropriate for all audiences of AI. We suggest bias profiles with respect to stereotype dimensions based on dictionaries from social psychology research. Along these dimensions we investigate gender bias in contextual embeddings, across contexts and layers, and generate stereotype profiles for twelve different LLMs, demonstrating their intuition and use case for exposing and visualizing bias.
%U https://aclanthology.org/2025.nodalida-1.65/
%P 639-650
Markdown (Informal)
[Profiling Bias in LLMs: Stereotype Dimensions in Contextual Word Embeddings](https://aclanthology.org/2025.nodalida-1.65/) (Schuster et al., NoDaLiDa 2025)
ACL
- Carolin M. Schuster, Maria-Alexandra Roman, Shashwat Ghatiwala, and Georg Groh. 2025. Profiling Bias in LLMs: Stereotype Dimensions in Contextual Word Embeddings. In Proceedings of the Joint 25th Nordic Conference on Computational Linguistics and 11th Baltic Conference on Human Language Technologies (NoDaLiDa/Baltic-HLT 2025), pages 639–650, Tallinn, Estonia. University of Tartu Library.