@inproceedings{wan-etal-2024-evidence,
title = "What Evidence Do Language Models Find Convincing?",
author = "Wan, Alexander and
Wallace, Eric and
Klein, Dan",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.luhme-long.403/",
doi = "10.18653/v1/2024.acl-long.403",
pages = "7468--7484",
abstract = "Retrieval-augmented language models are being increasingly tasked with subjective, contentious, and conflicting queries such as {\textquotedblleft}is aspartame linked to cancer{\textquotedblright}. To resolve these ambiguous queries, one must search through a large range of websites and consider {\textquotedblleft}which, if any, of this evidence do I find convincing?{\textquotedblright}. In this work, we study how LLMs answer this question. In particular, we construct ConflictingQA, a dataset that pairs controversial queries with a series of real-world evidence documents that contain different facts (e.g., quantitative results), argument styles (e.g., appeals to authority), and answers (Yes or No). We use this dataset to perform sensitivity and counterfactual analyses to explore which text features most affect LLM predictions. Overall, we find that current models rely heavily on the relevance of a website to the query, while largely ignoring stylistic features that humans find important such as whether a text contains scientific references or is written with a neutral tone. Taken together, these results highlight the importance of RAG corpus quality (e.g., the need to filter misinformation), and possibly even a shift in how LLMs are trained to better align with human judgements."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wan-etal-2024-evidence">
<titleInfo>
<title>What Evidence Do Language Models Find Convincing?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Wan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eric</namePart>
<namePart type="family">Wallace</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Klein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Retrieval-augmented language models are being increasingly tasked with subjective, contentious, and conflicting queries such as “is aspartame linked to cancer”. To resolve these ambiguous queries, one must search through a large range of websites and consider “which, if any, of this evidence do I find convincing?”. In this work, we study how LLMs answer this question. In particular, we construct ConflictingQA, a dataset that pairs controversial queries with a series of real-world evidence documents that contain different facts (e.g., quantitative results), argument styles (e.g., appeals to authority), and answers (Yes or No). We use this dataset to perform sensitivity and counterfactual analyses to explore which text features most affect LLM predictions. Overall, we find that current models rely heavily on the relevance of a website to the query, while largely ignoring stylistic features that humans find important such as whether a text contains scientific references or is written with a neutral tone. Taken together, these results highlight the importance of RAG corpus quality (e.g., the need to filter misinformation), and possibly even a shift in how LLMs are trained to better align with human judgements.</abstract>
<identifier type="citekey">wan-etal-2024-evidence</identifier>
<identifier type="doi">10.18653/v1/2024.acl-long.403</identifier>
<location>
<url>https://aclanthology.org/2024.luhme-long.403/</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>7468</start>
<end>7484</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What Evidence Do Language Models Find Convincing?
%A Wan, Alexander
%A Wallace, Eric
%A Klein, Dan
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F wan-etal-2024-evidence
%X Retrieval-augmented language models are being increasingly tasked with subjective, contentious, and conflicting queries such as “is aspartame linked to cancer”. To resolve these ambiguous queries, one must search through a large range of websites and consider “which, if any, of this evidence do I find convincing?”. In this work, we study how LLMs answer this question. In particular, we construct ConflictingQA, a dataset that pairs controversial queries with a series of real-world evidence documents that contain different facts (e.g., quantitative results), argument styles (e.g., appeals to authority), and answers (Yes or No). We use this dataset to perform sensitivity and counterfactual analyses to explore which text features most affect LLM predictions. Overall, we find that current models rely heavily on the relevance of a website to the query, while largely ignoring stylistic features that humans find important such as whether a text contains scientific references or is written with a neutral tone. Taken together, these results highlight the importance of RAG corpus quality (e.g., the need to filter misinformation), and possibly even a shift in how LLMs are trained to better align with human judgements.
%R 10.18653/v1/2024.acl-long.403
%U https://aclanthology.org/2024.luhme-long.403/
%U https://doi.org/10.18653/v1/2024.acl-long.403
%P 7468-7484
Markdown (Informal)
[What Evidence Do Language Models Find Convincing?](https://aclanthology.org/2024.luhme-long.403/) (Wan et al., ACL 2024)
ACL
- Alexander Wan, Eric Wallace, and Dan Klein. 2024. What Evidence Do Language Models Find Convincing?. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7468–7484, Bangkok, Thailand. Association for Computational Linguistics.