@inproceedings{sun-etal-2025-causalabstain,
title = "{C}ausal{A}bstain: Enhancing Multilingual {LLM}s with Causal Reasoning for Trustworthy Abstention",
author = "Sun, Yuxi and
Zuo, Aoqi and
Gao, Wei and
Ma, Jing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.723/",
doi = "10.18653/v1/2025.findings-acl.723",
pages = "14060--14076",
ISBN = "979-8-89176-256-5",
abstract = "Large Language Models (LLMs) often exhibit knowledge disparities across languages. Encouraging LLMs to $\textit{abstain}$ when faced with knowledge gaps is a promising strategy to reduce hallucinations in multilingual settings. Current abstention strategies for multilingual scenarios primarily rely on generating feedback in various languages using LLMs and performing self-reflection. However, these methods can be adversely impacted by inaccuracies and biases in the generated feedback. To address this, from a causal perspective, we introduce $\textit{CausalAbstain}$, a method that helps LLMs determine whether to utilize multiple generated feedback responses and how to identify the most useful ones. Extensive experiments demonstrate that $\textit{CausalAbstain}$ effectively selects helpful feedback and enhances abstention decisions with interpretability in both native language ($\textit{Casual-native}$) and multilingual ($\textit{Causal-multi}$) settings, outperforming strong baselines on two benchmark datasets covering encyclopedic and commonsense knowledge QA tasks."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="sun-etal-2025-causalabstain">
<titleInfo>
<title>CausalAbstain: Enhancing Multilingual LLMs with Causal Reasoning for Trustworthy Abstention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuxi</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aoqi</namePart>
<namePart type="family">Zuo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Gao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>Large Language Models (LLMs) often exhibit knowledge disparities across languages. Encouraging LLMs to abstain when faced with knowledge gaps is a promising strategy to reduce hallucinations in multilingual settings. Current abstention strategies for multilingual scenarios primarily rely on generating feedback in various languages using LLMs and performing self-reflection. However, these methods can be adversely impacted by inaccuracies and biases in the generated feedback. To address this, from a causal perspective, we introduce CausalAbstain, a method that helps LLMs determine whether to utilize multiple generated feedback responses and how to identify the most useful ones. Extensive experiments demonstrate that CausalAbstain effectively selects helpful feedback and enhances abstention decisions with interpretability in both native language (Casual-native) and multilingual (Causal-multi) settings, outperforming strong baselines on two benchmark datasets covering encyclopedic and commonsense knowledge QA tasks.</abstract>
<identifier type="citekey">sun-etal-2025-causalabstain</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.723</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.723/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>14060</start>
<end>14076</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T CausalAbstain: Enhancing Multilingual LLMs with Causal Reasoning for Trustworthy Abstention
%A Sun, Yuxi
%A Zuo, Aoqi
%A Gao, Wei
%A Ma, Jing
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F sun-etal-2025-causalabstain
%X Large Language Models (LLMs) often exhibit knowledge disparities across languages. Encouraging LLMs to abstain when faced with knowledge gaps is a promising strategy to reduce hallucinations in multilingual settings. Current abstention strategies for multilingual scenarios primarily rely on generating feedback in various languages using LLMs and performing self-reflection. However, these methods can be adversely impacted by inaccuracies and biases in the generated feedback. To address this, from a causal perspective, we introduce CausalAbstain, a method that helps LLMs determine whether to utilize multiple generated feedback responses and how to identify the most useful ones. Extensive experiments demonstrate that CausalAbstain effectively selects helpful feedback and enhances abstention decisions with interpretability in both native language (Casual-native) and multilingual (Causal-multi) settings, outperforming strong baselines on two benchmark datasets covering encyclopedic and commonsense knowledge QA tasks.
%R 10.18653/v1/2025.findings-acl.723
%U https://aclanthology.org/2025.findings-acl.723/
%U https://doi.org/10.18653/v1/2025.findings-acl.723
%P 14060-14076
Markdown (Informal)
[CausalAbstain: Enhancing Multilingual LLMs with Causal Reasoning for Trustworthy Abstention](https://aclanthology.org/2025.findings-acl.723/) (Sun et al., Findings 2025)
ACL