@inproceedings{kantharuban-etal-2025-stereotype,
title = "Stereotype or Personalization? User Identity Biases Chatbot Recommendations",
author = "Kantharuban, Anjali and
Milbauer, Jeremiah and
Sap, Maarten and
Strubell, Emma and
Neubig, Graham",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.1254/",
doi = "10.18653/v1/2025.findings-acl.1254",
pages = "24418--24436",
ISBN = "979-8-89176-256-5",
abstract = "While personalized recommendations are often desired by users, it can be difficult in practice to distinguish cases of bias from cases of personalization: we find that models generate racially stereotypical recommendations regardless of whether the user revealed their identity intentionally through explicit indications or unintentionally through implicit cues. We demonstrate that when people use large language models (LLMs) to generate recommendations, the LLMs produce responses that reflect both what the user wants and who the user is. We argue that chatbots ought to transparently indicate when recommendations are influenced by a user{'}s revealed identity characteristics, but observe that they currently fail to do so. Our experiments show that even though a user{'}s revealed identity significantly influences model recommendations ($p < 0.001$), model responses obfuscate this fact in response to user queries. This bias and lack of transparency occurs consistently across multiple popular consumer LLMs and for four American racial groups."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kantharuban-etal-2025-stereotype">
<titleInfo>
<title>Stereotype or Personalization? User Identity Biases Chatbot Recommendations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anjali</namePart>
<namePart type="family">Kantharuban</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeremiah</namePart>
<namePart type="family">Milbauer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maarten</namePart>
<namePart type="family">Sap</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emma</namePart>
<namePart type="family">Strubell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Graham</namePart>
<namePart type="family">Neubig</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>While personalized recommendations are often desired by users, it can be difficult in practice to distinguish cases of bias from cases of personalization: we find that models generate racially stereotypical recommendations regardless of whether the user revealed their identity intentionally through explicit indications or unintentionally through implicit cues. We demonstrate that when people use large language models (LLMs) to generate recommendations, the LLMs produce responses that reflect both what the user wants and who the user is. We argue that chatbots ought to transparently indicate when recommendations are influenced by a user’s revealed identity characteristics, but observe that they currently fail to do so. Our experiments show that even though a user’s revealed identity significantly influences model recommendations (p < 0.001), model responses obfuscate this fact in response to user queries. This bias and lack of transparency occurs consistently across multiple popular consumer LLMs and for four American racial groups.</abstract>
<identifier type="citekey">kantharuban-etal-2025-stereotype</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.1254</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.1254/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>24418</start>
<end>24436</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Stereotype or Personalization? User Identity Biases Chatbot Recommendations
%A Kantharuban, Anjali
%A Milbauer, Jeremiah
%A Sap, Maarten
%A Strubell, Emma
%A Neubig, Graham
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F kantharuban-etal-2025-stereotype
%X While personalized recommendations are often desired by users, it can be difficult in practice to distinguish cases of bias from cases of personalization: we find that models generate racially stereotypical recommendations regardless of whether the user revealed their identity intentionally through explicit indications or unintentionally through implicit cues. We demonstrate that when people use large language models (LLMs) to generate recommendations, the LLMs produce responses that reflect both what the user wants and who the user is. We argue that chatbots ought to transparently indicate when recommendations are influenced by a user’s revealed identity characteristics, but observe that they currently fail to do so. Our experiments show that even though a user’s revealed identity significantly influences model recommendations (p < 0.001), model responses obfuscate this fact in response to user queries. This bias and lack of transparency occurs consistently across multiple popular consumer LLMs and for four American racial groups.
%R 10.18653/v1/2025.findings-acl.1254
%U https://aclanthology.org/2025.findings-acl.1254/
%U https://doi.org/10.18653/v1/2025.findings-acl.1254
%P 24418-24436
Markdown (Informal)
[Stereotype or Personalization? User Identity Biases Chatbot Recommendations](https://aclanthology.org/2025.findings-acl.1254/) (Kantharuban et al., Findings 2025)
ACL