@inproceedings{davenia-basile-2025-quantifying,
title = "Quantifying the Influence of Irrelevant Contexts on Political Opinions Produced by {LLM}s",
author = "D{'}Avenia, Samuele and
Basile, Valerio",
editor = "Zhao, Jin and
Wang, Mingyang and
Liu, Zhu",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-srw.28/",
doi = "10.18653/v1/2025.acl-srw.28",
pages = "434--454",
ISBN = "979-8-89176-254-1",
abstract = "Several recent works have examined the generations produced by large language models (LLMs) on subjective topics such as political opinions and attitudinal questionnaires. There is growing interest in controlling these outputs to align with specific users or perspectives using model steering techniques. However, several studies have highlighted unintended and unexpected steering effects, where minor changes in the prompt or irrelevant contextual cues influence model-generated opinions.This work empirically tests how irrelevant information can systematically bias model opinions in specific directions. Using the Political Compass Test questionnaire, we conduct a detailed statistical analysis to quantify these shifts using the opinions generated by LLMs in an open-generation setting. The results demonstrate that even seemingly unrelated contexts consistently alter model responses in predictable ways, further highlighting challenges in ensuring the robustness and reliability of LLMs when generating opinions on subjective topics."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="davenia-basile-2025-quantifying">
<titleInfo>
<title>Quantifying the Influence of Irrelevant Contexts on Political Opinions Produced by LLMs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Samuele</namePart>
<namePart type="family">D’Avenia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Valerio</namePart>
<namePart type="family">Basile</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jin</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mingyang</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhu</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-254-1</identifier>
</relatedItem>
<abstract>Several recent works have examined the generations produced by large language models (LLMs) on subjective topics such as political opinions and attitudinal questionnaires. There is growing interest in controlling these outputs to align with specific users or perspectives using model steering techniques. However, several studies have highlighted unintended and unexpected steering effects, where minor changes in the prompt or irrelevant contextual cues influence model-generated opinions.This work empirically tests how irrelevant information can systematically bias model opinions in specific directions. Using the Political Compass Test questionnaire, we conduct a detailed statistical analysis to quantify these shifts using the opinions generated by LLMs in an open-generation setting. The results demonstrate that even seemingly unrelated contexts consistently alter model responses in predictable ways, further highlighting challenges in ensuring the robustness and reliability of LLMs when generating opinions on subjective topics.</abstract>
<identifier type="citekey">davenia-basile-2025-quantifying</identifier>
<identifier type="doi">10.18653/v1/2025.acl-srw.28</identifier>
<location>
<url>https://aclanthology.org/2025.acl-srw.28/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>434</start>
<end>454</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Quantifying the Influence of Irrelevant Contexts on Political Opinions Produced by LLMs
%A D’Avenia, Samuele
%A Basile, Valerio
%Y Zhao, Jin
%Y Wang, Mingyang
%Y Liu, Zhu
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-254-1
%F davenia-basile-2025-quantifying
%X Several recent works have examined the generations produced by large language models (LLMs) on subjective topics such as political opinions and attitudinal questionnaires. There is growing interest in controlling these outputs to align with specific users or perspectives using model steering techniques. However, several studies have highlighted unintended and unexpected steering effects, where minor changes in the prompt or irrelevant contextual cues influence model-generated opinions.This work empirically tests how irrelevant information can systematically bias model opinions in specific directions. Using the Political Compass Test questionnaire, we conduct a detailed statistical analysis to quantify these shifts using the opinions generated by LLMs in an open-generation setting. The results demonstrate that even seemingly unrelated contexts consistently alter model responses in predictable ways, further highlighting challenges in ensuring the robustness and reliability of LLMs when generating opinions on subjective topics.
%R 10.18653/v1/2025.acl-srw.28
%U https://aclanthology.org/2025.acl-srw.28/
%U https://doi.org/10.18653/v1/2025.acl-srw.28
%P 434-454
Markdown (Informal)
[Quantifying the Influence of Irrelevant Contexts on Political Opinions Produced by LLMs](https://aclanthology.org/2025.acl-srw.28/) (D’Avenia & Basile, ACL 2025)
ACL