@inproceedings{chockkalingam-etal-2025-go,
title = "Should {I} go vegan: Evaluating the Persuasiveness of {LLM}s in Persona-Grounded Dialogues",
author = "Chockkalingam, Shruthi and
Hossein Alavi, Seyed and
T. Ng, Raymond and
Shwartz, Vered",
editor = "Hale, James and
Kwon, Brian Deuksin and
Dutt, Ritam",
booktitle = "Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.sicon-1.4/",
doi = "10.18653/v1/2025.sicon-1.4",
pages = "65--72",
ISBN = "979-8-89176-266-4",
abstract = "As the use of large language models becomes ever more prevalent, understanding their persuasive abilities, both in ways that can be beneficial and harmful to humans, proves an important task. Previous work has focused on persuasion in the context of negotiations, political debate and advertising. We instead shift the focus to a more realistic setup of a dialogue between a persuadee with an everyday dilemma (e.g., whether to switch to a vegan diet or not) and a persuader with no prior knowledge about the persuadee who is trying to persuade them towards a certain decision based on arguments they feel would be most suited to the persuadee{'}s persona. We collect and analyze conversations between a human persuadee and either a human persuader or an LLM persuader based on GPT-4. We find that, in this setting, GPT-4 is perceived as both more persuasive and more empathetic, whereas humans are more skilled at discovering new information about the person they are speaking to. This research provides the groundwork for future work predicting the persuasiveness of utterances in conversation across a range of topics."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chockkalingam-etal-2025-go">
<titleInfo>
<title>Should I go vegan: Evaluating the Persuasiveness of LLMs in Persona-Grounded Dialogues</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shruthi</namePart>
<namePart type="family">Chockkalingam</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Seyed</namePart>
<namePart type="family">Hossein Alavi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Raymond</namePart>
<namePart type="family">T. Ng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vered</namePart>
<namePart type="family">Shwartz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Hale</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Brian</namePart>
<namePart type="given">Deuksin</namePart>
<namePart type="family">Kwon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ritam</namePart>
<namePart type="family">Dutt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-266-4</identifier>
</relatedItem>
<abstract>As the use of large language models becomes ever more prevalent, understanding their persuasive abilities, both in ways that can be beneficial and harmful to humans, proves an important task. Previous work has focused on persuasion in the context of negotiations, political debate and advertising. We instead shift the focus to a more realistic setup of a dialogue between a persuadee with an everyday dilemma (e.g., whether to switch to a vegan diet or not) and a persuader with no prior knowledge about the persuadee who is trying to persuade them towards a certain decision based on arguments they feel would be most suited to the persuadee’s persona. We collect and analyze conversations between a human persuadee and either a human persuader or an LLM persuader based on GPT-4. We find that, in this setting, GPT-4 is perceived as both more persuasive and more empathetic, whereas humans are more skilled at discovering new information about the person they are speaking to. This research provides the groundwork for future work predicting the persuasiveness of utterances in conversation across a range of topics.</abstract>
<identifier type="citekey">chockkalingam-etal-2025-go</identifier>
<identifier type="doi">10.18653/v1/2025.sicon-1.4</identifier>
<location>
<url>https://aclanthology.org/2025.sicon-1.4/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>65</start>
<end>72</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Should I go vegan: Evaluating the Persuasiveness of LLMs in Persona-Grounded Dialogues
%A Chockkalingam, Shruthi
%A Hossein Alavi, Seyed
%A T. Ng, Raymond
%A Shwartz, Vered
%Y Hale, James
%Y Kwon, Brian Deuksin
%Y Dutt, Ritam
%S Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-266-4
%F chockkalingam-etal-2025-go
%X As the use of large language models becomes ever more prevalent, understanding their persuasive abilities, both in ways that can be beneficial and harmful to humans, proves an important task. Previous work has focused on persuasion in the context of negotiations, political debate and advertising. We instead shift the focus to a more realistic setup of a dialogue between a persuadee with an everyday dilemma (e.g., whether to switch to a vegan diet or not) and a persuader with no prior knowledge about the persuadee who is trying to persuade them towards a certain decision based on arguments they feel would be most suited to the persuadee’s persona. We collect and analyze conversations between a human persuadee and either a human persuader or an LLM persuader based on GPT-4. We find that, in this setting, GPT-4 is perceived as both more persuasive and more empathetic, whereas humans are more skilled at discovering new information about the person they are speaking to. This research provides the groundwork for future work predicting the persuasiveness of utterances in conversation across a range of topics.
%R 10.18653/v1/2025.sicon-1.4
%U https://aclanthology.org/2025.sicon-1.4/
%U https://doi.org/10.18653/v1/2025.sicon-1.4
%P 65-72
Markdown (Informal)
[Should I go vegan: Evaluating the Persuasiveness of LLMs in Persona-Grounded Dialogues](https://aclanthology.org/2025.sicon-1.4/) (Chockkalingam et al., SICon 2025)
ACL