@inproceedings{yang-etal-2025-misinfobench,
title = "{M}isinfo{B}ench: A Multi-Dimensional Benchmark for Evaluating {LLM}s' Resilience to Misinformation",
author = "Yang, Ye and
Li, Donghe and
Li, Zuchen and
Li, Fengyuan and
Liu, Jingyi and
Sun, Li and
Yang, Qingyu",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.540/",
pages = "10202--10229",
ISBN = "979-8-89176-335-7",
abstract = "Large Language Models (LLMs) excel in various Natural Language Processing (NLP) tasks but remain vulnerable to misinformation, particularly in multi-turn dialogues where misleading context accumulates. Existing benchmarks, such as TruthfulQA and FEVER, assess factual accuracy in isolated queries but fail to evaluate LLMs' resilience to misinformation in interactive settings. To address this limitation, we introduce MisinfoBench, a multi-dimensional benchmark designed to assess LLMs' ability to discern, resist, and reject misinformation. MisinfoBench defines three core dimensions{---}Discernment, Resistance, and Principled Refusal{---}across seven evaluation tasks, systematically testing misinformation identification, contextual resistance, and the rejection of coercive false premises. It includes a dataset of 4,962 multi-turn dialogues and 2,000 misinformation-based question-answer pairs, capturing diverse misinformation scenarios. We evaluate 16 LLMs, revealing substantial disparities in misinformation resilience: proprietary models outperform open-source counterparts, while multi-turn dialogues and cross-lingual settings exacerbate misinformation susceptibility. Our findings highlight persistent vulnerabilities in LLMs' misinformation defenses, emphasizing the need for context-aware training, adversarial robustness, and principled reasoning. MisinfoBench establishes a rigorous standard for evaluating misinformation resilience, advancing the development of more trustworthy AI systems."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yang-etal-2025-misinfobench">
<titleInfo>
<title>MisinfoBench: A Multi-Dimensional Benchmark for Evaluating LLMs’ Resilience to Misinformation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ye</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Donghe</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zuchen</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fengyuan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jingyi</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Li</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qingyu</namePart>
<namePart type="family">Yang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large Language Models (LLMs) excel in various Natural Language Processing (NLP) tasks but remain vulnerable to misinformation, particularly in multi-turn dialogues where misleading context accumulates. Existing benchmarks, such as TruthfulQA and FEVER, assess factual accuracy in isolated queries but fail to evaluate LLMs’ resilience to misinformation in interactive settings. To address this limitation, we introduce MisinfoBench, a multi-dimensional benchmark designed to assess LLMs’ ability to discern, resist, and reject misinformation. MisinfoBench defines three core dimensions—Discernment, Resistance, and Principled Refusal—across seven evaluation tasks, systematically testing misinformation identification, contextual resistance, and the rejection of coercive false premises. It includes a dataset of 4,962 multi-turn dialogues and 2,000 misinformation-based question-answer pairs, capturing diverse misinformation scenarios. We evaluate 16 LLMs, revealing substantial disparities in misinformation resilience: proprietary models outperform open-source counterparts, while multi-turn dialogues and cross-lingual settings exacerbate misinformation susceptibility. Our findings highlight persistent vulnerabilities in LLMs’ misinformation defenses, emphasizing the need for context-aware training, adversarial robustness, and principled reasoning. MisinfoBench establishes a rigorous standard for evaluating misinformation resilience, advancing the development of more trustworthy AI systems.</abstract>
<identifier type="citekey">yang-etal-2025-misinfobench</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.540/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>10202</start>
<end>10229</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MisinfoBench: A Multi-Dimensional Benchmark for Evaluating LLMs’ Resilience to Misinformation
%A Yang, Ye
%A Li, Donghe
%A Li, Zuchen
%A Li, Fengyuan
%A Liu, Jingyi
%A Sun, Li
%A Yang, Qingyu
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F yang-etal-2025-misinfobench
%X Large Language Models (LLMs) excel in various Natural Language Processing (NLP) tasks but remain vulnerable to misinformation, particularly in multi-turn dialogues where misleading context accumulates. Existing benchmarks, such as TruthfulQA and FEVER, assess factual accuracy in isolated queries but fail to evaluate LLMs’ resilience to misinformation in interactive settings. To address this limitation, we introduce MisinfoBench, a multi-dimensional benchmark designed to assess LLMs’ ability to discern, resist, and reject misinformation. MisinfoBench defines three core dimensions—Discernment, Resistance, and Principled Refusal—across seven evaluation tasks, systematically testing misinformation identification, contextual resistance, and the rejection of coercive false premises. It includes a dataset of 4,962 multi-turn dialogues and 2,000 misinformation-based question-answer pairs, capturing diverse misinformation scenarios. We evaluate 16 LLMs, revealing substantial disparities in misinformation resilience: proprietary models outperform open-source counterparts, while multi-turn dialogues and cross-lingual settings exacerbate misinformation susceptibility. Our findings highlight persistent vulnerabilities in LLMs’ misinformation defenses, emphasizing the need for context-aware training, adversarial robustness, and principled reasoning. MisinfoBench establishes a rigorous standard for evaluating misinformation resilience, advancing the development of more trustworthy AI systems.
%U https://aclanthology.org/2025.findings-emnlp.540/
%P 10202-10229
Markdown (Informal)
[MisinfoBench: A Multi-Dimensional Benchmark for Evaluating LLMs’ Resilience to Misinformation](https://aclanthology.org/2025.findings-emnlp.540/) (Yang et al., Findings 2025)
ACL