@inproceedings{cohen-etal-2025-infact,
title = "{I}n{F}act: Informativeness Alignment for Improved {LLM} Factuality",
author = "Cohen, Roi and
Biswas, Russa and
de Melo, Gerard",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.971/",
pages = "17876--17888",
ISBN = "979-8-89176-335-7",
abstract = "Factual completeness is a general term that captures how detailed and informative a factually correct text is. For instance, the factual sentence ``Barack Obama was born in the United States'' is factually correct, though less informative than the factual sentence ``Barack Obama was born in Honolulu, Hawaii, United States''. Despite the known fact that LLMs tend to hallucinate and generate factually incorrect text, they might also tend to choose to generate factual text that is indeed factually correct and yet less informative than other, more informative choices. In this work, we tackle this problem by proposing an informativeness alignment mechanism. This mechanism takes advantage of recent factual informativeness benchmarks to propose an informativeness alignment objective. This objective prioritizes answers that are both correct and informative. We find that when training a model to maximize this objective or optimize its preference, we can improve not just informativeness but also factuality."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="cohen-etal-2025-infact">
<titleInfo>
<title>InFact: Informativeness Alignment for Improved LLM Factuality</title>
</titleInfo>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Russa</namePart>
<namePart type="family">Biswas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gerard</namePart>
<namePart type="family">de Melo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Factual completeness is a general term that captures how detailed and informative a factually correct text is. For instance, the factual sentence “Barack Obama was born in the United States” is factually correct, though less informative than the factual sentence “Barack Obama was born in Honolulu, Hawaii, United States”. Despite the known fact that LLMs tend to hallucinate and generate factually incorrect text, they might also tend to choose to generate factual text that is indeed factually correct and yet less informative than other, more informative choices. In this work, we tackle this problem by proposing an informativeness alignment mechanism. This mechanism takes advantage of recent factual informativeness benchmarks to propose an informativeness alignment objective. This objective prioritizes answers that are both correct and informative. We find that when training a model to maximize this objective or optimize its preference, we can improve not just informativeness but also factuality.</abstract>
<identifier type="citekey">cohen-etal-2025-infact</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.971/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>17876</start>
<end>17888</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T InFact: Informativeness Alignment for Improved LLM Factuality
%A Cohen, Roi
%A Biswas, Russa
%A de Melo, Gerard
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F cohen-etal-2025-infact
%X Factual completeness is a general term that captures how detailed and informative a factually correct text is. For instance, the factual sentence “Barack Obama was born in the United States” is factually correct, though less informative than the factual sentence “Barack Obama was born in Honolulu, Hawaii, United States”. Despite the known fact that LLMs tend to hallucinate and generate factually incorrect text, they might also tend to choose to generate factual text that is indeed factually correct and yet less informative than other, more informative choices. In this work, we tackle this problem by proposing an informativeness alignment mechanism. This mechanism takes advantage of recent factual informativeness benchmarks to propose an informativeness alignment objective. This objective prioritizes answers that are both correct and informative. We find that when training a model to maximize this objective or optimize its preference, we can improve not just informativeness but also factuality.
%U https://aclanthology.org/2025.findings-emnlp.971/
%P 17876-17888
Markdown (Informal)
[InFact: Informativeness Alignment for Improved LLM Factuality](https://aclanthology.org/2025.findings-emnlp.971/) (Cohen et al., Findings 2025)
ACL