@inproceedings{zhu-etal-2024-pollmgraph,
title = "{P}o{LLM}graph: Unraveling Hallucinations in Large Language Models via State Transition Dynamics",
author = "Zhu, Derui and
Chen, Dingfan and
Li, Qing and
Chen, Zongxiong and
Ma, Lei and
Grossklags, Jens and
Fritz, Mario",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-naacl.294",
doi = "10.18653/v1/2024.findings-naacl.294",
pages = "4737--4751",
abstract = "Despite tremendous advancements in large language models (LLMs) over recent years, a notably urgent challenge for their practical deployment is the phenomenon of ''$\textit{hallucination}${''}, where the model fabricates facts and produces non-factual statements. In response, we propose $\texttt{PoLLMgraph}${---}a Polygraph for LLMs{---}as an effective model-based white-box detection and forecasting approach. $\texttt{PoLLMgraph}$ distinctly differs from the large body of existing research that concentrates on addressing such challenges through black-box evaluations. In particular, we demonstrate that hallucination can be effectively detected by analyzing the LLM{'}s internal state transition dynamics during generation via tractable probabilistic models. Experimental results on various open-source LLMs confirm the efficacy of $\texttt{PoLLMgraph}$, outperforming state-of-the-art methods by a considerable margin, evidenced by over 20{\%} improvement in AUC-ROC on common benchmarking datasets like TruthfulQA. Our work paves a new way for model-based white-box analysis of LLMs, motivating the research community to further explore, understand, and refine the intricate dynamics of LLM behaviors.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhu-etal-2024-pollmgraph">
<titleInfo>
<title>PoLLMgraph: Unraveling Hallucinations in Large Language Models via State Transition Dynamics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Derui</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dingfan</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qing</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zongxiong</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lei</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jens</namePart>
<namePart type="family">Grossklags</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mario</namePart>
<namePart type="family">Fritz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Duh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Helena</namePart>
<namePart type="family">Gomez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite tremendous advancements in large language models (LLMs) over recent years, a notably urgent challenge for their practical deployment is the phenomenon of ”hallucination”, where the model fabricates facts and produces non-factual statements. In response, we propose PoLLMgraph—a Polygraph for LLMs—as an effective model-based white-box detection and forecasting approach. PoLLMgraph distinctly differs from the large body of existing research that concentrates on addressing such challenges through black-box evaluations. In particular, we demonstrate that hallucination can be effectively detected by analyzing the LLM’s internal state transition dynamics during generation via tractable probabilistic models. Experimental results on various open-source LLMs confirm the efficacy of PoLLMgraph, outperforming state-of-the-art methods by a considerable margin, evidenced by over 20% improvement in AUC-ROC on common benchmarking datasets like TruthfulQA. Our work paves a new way for model-based white-box analysis of LLMs, motivating the research community to further explore, understand, and refine the intricate dynamics of LLM behaviors.</abstract>
<identifier type="citekey">zhu-etal-2024-pollmgraph</identifier>
<identifier type="doi">10.18653/v1/2024.findings-naacl.294</identifier>
<location>
<url>https://aclanthology.org/2024.findings-naacl.294</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>4737</start>
<end>4751</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T PoLLMgraph: Unraveling Hallucinations in Large Language Models via State Transition Dynamics
%A Zhu, Derui
%A Chen, Dingfan
%A Li, Qing
%A Chen, Zongxiong
%A Ma, Lei
%A Grossklags, Jens
%A Fritz, Mario
%Y Duh, Kevin
%Y Gomez, Helena
%Y Bethard, Steven
%S Findings of the Association for Computational Linguistics: NAACL 2024
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico
%F zhu-etal-2024-pollmgraph
%X Despite tremendous advancements in large language models (LLMs) over recent years, a notably urgent challenge for their practical deployment is the phenomenon of ”hallucination”, where the model fabricates facts and produces non-factual statements. In response, we propose PoLLMgraph—a Polygraph for LLMs—as an effective model-based white-box detection and forecasting approach. PoLLMgraph distinctly differs from the large body of existing research that concentrates on addressing such challenges through black-box evaluations. In particular, we demonstrate that hallucination can be effectively detected by analyzing the LLM’s internal state transition dynamics during generation via tractable probabilistic models. Experimental results on various open-source LLMs confirm the efficacy of PoLLMgraph, outperforming state-of-the-art methods by a considerable margin, evidenced by over 20% improvement in AUC-ROC on common benchmarking datasets like TruthfulQA. Our work paves a new way for model-based white-box analysis of LLMs, motivating the research community to further explore, understand, and refine the intricate dynamics of LLM behaviors.
%R 10.18653/v1/2024.findings-naacl.294
%U https://aclanthology.org/2024.findings-naacl.294
%U https://doi.org/10.18653/v1/2024.findings-naacl.294
%P 4737-4751
Markdown (Informal)
[PoLLMgraph: Unraveling Hallucinations in Large Language Models via State Transition Dynamics](https://aclanthology.org/2024.findings-naacl.294) (Zhu et al., Findings 2024)
ACL