@inproceedings{mukherjee-etal-2024-cultural,
title = "Cultural Conditioning or Placebo? On the Effectiveness of Socio-Demographic Prompting",
author = "Mukherjee, Sagnik and
Adilazuarda, Muhammad and
Sitaram, Sunayana and
Bali, Kalika and
Aji, Alham and
Choudhury, Monojit",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.emnlp-main.884",
pages = "15811--15837",
abstract = "Socio-demographic prompting is a commonly employed approach to study cultural biases in LLMs as well as for aligning models to certain cultures. In this paper, we systematically probe four LLMs (Llama 3, Mistral v0.2, GPT-3.5 Turbo and GPT4) with prompts that are conditioned on culturally sensitive and non-sensitive cues, on datasets that are supposed to be culturally sensitive (EtiCor and CALI) or neutral (MMLU and ETHICS). We observe that all models except GPT4 show significant variations in their responses on both kinds of datasets for both kinds of prompts, casting doubt on the robustness of the culturally-conditioned prompting as a method for eliciting cultural bias in models that are not sufficiently stable with respect to arbitrary prompting cues. Further, we also show that some of the supposedly culturally neutral datasets have a non-trivial fraction of culturally sensitive questions/tasks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="mukherjee-etal-2024-cultural">
<titleInfo>
<title>Cultural Conditioning or Placebo? On the Effectiveness of Socio-Demographic Prompting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sagnik</namePart>
<namePart type="family">Mukherjee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Muhammad</namePart>
<namePart type="family">Adilazuarda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sunayana</namePart>
<namePart type="family">Sitaram</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alham</namePart>
<namePart type="family">Aji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Monojit</namePart>
<namePart type="family">Choudhury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Socio-demographic prompting is a commonly employed approach to study cultural biases in LLMs as well as for aligning models to certain cultures. In this paper, we systematically probe four LLMs (Llama 3, Mistral v0.2, GPT-3.5 Turbo and GPT4) with prompts that are conditioned on culturally sensitive and non-sensitive cues, on datasets that are supposed to be culturally sensitive (EtiCor and CALI) or neutral (MMLU and ETHICS). We observe that all models except GPT4 show significant variations in their responses on both kinds of datasets for both kinds of prompts, casting doubt on the robustness of the culturally-conditioned prompting as a method for eliciting cultural bias in models that are not sufficiently stable with respect to arbitrary prompting cues. Further, we also show that some of the supposedly culturally neutral datasets have a non-trivial fraction of culturally sensitive questions/tasks.</abstract>
<identifier type="citekey">mukherjee-etal-2024-cultural</identifier>
<location>
<url>https://aclanthology.org/2024.emnlp-main.884</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>15811</start>
<end>15837</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Cultural Conditioning or Placebo? On the Effectiveness of Socio-Demographic Prompting
%A Mukherjee, Sagnik
%A Adilazuarda, Muhammad
%A Sitaram, Sunayana
%A Bali, Kalika
%A Aji, Alham
%A Choudhury, Monojit
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F mukherjee-etal-2024-cultural
%X Socio-demographic prompting is a commonly employed approach to study cultural biases in LLMs as well as for aligning models to certain cultures. In this paper, we systematically probe four LLMs (Llama 3, Mistral v0.2, GPT-3.5 Turbo and GPT4) with prompts that are conditioned on culturally sensitive and non-sensitive cues, on datasets that are supposed to be culturally sensitive (EtiCor and CALI) or neutral (MMLU and ETHICS). We observe that all models except GPT4 show significant variations in their responses on both kinds of datasets for both kinds of prompts, casting doubt on the robustness of the culturally-conditioned prompting as a method for eliciting cultural bias in models that are not sufficiently stable with respect to arbitrary prompting cues. Further, we also show that some of the supposedly culturally neutral datasets have a non-trivial fraction of culturally sensitive questions/tasks.
%U https://aclanthology.org/2024.emnlp-main.884
%P 15811-15837
Markdown (Informal)
[Cultural Conditioning or Placebo? On the Effectiveness of Socio-Demographic Prompting](https://aclanthology.org/2024.emnlp-main.884) (Mukherjee et al., EMNLP 2024)
ACL