@inproceedings{papantoniou-etal-2025-evaluating,
title = "Evaluating {LLM}s on Deceptive Text across Cultures",
author = "Papantoniou, Katerina and
Papadakos, Panagiotis and
Plexousakis, Dimitris",
editor = "Angelova, Galia and
Kunilovskaya, Maria and
Escribe, Marie and
Mitkov, Ruslan",
booktitle = "Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://aclanthology.org/2025.ranlp-1.101/",
pages = "884--893",
abstract = "Deception is a pervasive feature of human communication, yet identifying linguistic cues of deception remains a challenging task due to strong context dependency across domains, cultures, and types of deception. While prior work has relied on human analysis across disciplines like social psychology, philosophy, and political science, large language models (LLMs) offer a new avenue for exploring deception due to their strong performance in Natural Language Processing (NLP) tasks. In this study, we investigate whether open-weight LLMs possess and can apply knowledge about linguistic markers of deception across multiple languages, domains, and cultural contexts, with language and country of origin used as a proxy for culture. We focus on two domains, opinionated reviews and personal descriptions about sensitive topics, spanning five languages and six cultural settings. Using various configurations (zero-shot, one-shot, and fine-tuning), we evaluate the performance of LLMs in detecting and generating deceptive text. In detection tasks, our results reveal cross-model and cross-context performance differences. In generation tasks, linguistic analyses show partial alignment with known deception cues in human text, though this knowledge appears largely uniform and context-agnostic."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="papantoniou-etal-2025-evaluating">
<titleInfo>
<title>Evaluating LLMs on Deceptive Text across Cultures</title>
</titleInfo>
<name type="personal">
<namePart type="given">Katerina</namePart>
<namePart type="family">Papantoniou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Panagiotis</namePart>
<namePart type="family">Papadakos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dimitris</namePart>
<namePart type="family">Plexousakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era</title>
</titleInfo>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Kunilovskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Escribe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd., Shoumen, Bulgaria</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Deception is a pervasive feature of human communication, yet identifying linguistic cues of deception remains a challenging task due to strong context dependency across domains, cultures, and types of deception. While prior work has relied on human analysis across disciplines like social psychology, philosophy, and political science, large language models (LLMs) offer a new avenue for exploring deception due to their strong performance in Natural Language Processing (NLP) tasks. In this study, we investigate whether open-weight LLMs possess and can apply knowledge about linguistic markers of deception across multiple languages, domains, and cultural contexts, with language and country of origin used as a proxy for culture. We focus on two domains, opinionated reviews and personal descriptions about sensitive topics, spanning five languages and six cultural settings. Using various configurations (zero-shot, one-shot, and fine-tuning), we evaluate the performance of LLMs in detecting and generating deceptive text. In detection tasks, our results reveal cross-model and cross-context performance differences. In generation tasks, linguistic analyses show partial alignment with known deception cues in human text, though this knowledge appears largely uniform and context-agnostic.</abstract>
<identifier type="citekey">papantoniou-etal-2025-evaluating</identifier>
<location>
<url>https://aclanthology.org/2025.ranlp-1.101/</url>
</location>
<part>
<date>2025-09</date>
<extent unit="page">
<start>884</start>
<end>893</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Evaluating LLMs on Deceptive Text across Cultures
%A Papantoniou, Katerina
%A Papadakos, Panagiotis
%A Plexousakis, Dimitris
%Y Angelova, Galia
%Y Kunilovskaya, Maria
%Y Escribe, Marie
%Y Mitkov, Ruslan
%S Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era
%D 2025
%8 September
%I INCOMA Ltd., Shoumen, Bulgaria
%C Varna, Bulgaria
%F papantoniou-etal-2025-evaluating
%X Deception is a pervasive feature of human communication, yet identifying linguistic cues of deception remains a challenging task due to strong context dependency across domains, cultures, and types of deception. While prior work has relied on human analysis across disciplines like social psychology, philosophy, and political science, large language models (LLMs) offer a new avenue for exploring deception due to their strong performance in Natural Language Processing (NLP) tasks. In this study, we investigate whether open-weight LLMs possess and can apply knowledge about linguistic markers of deception across multiple languages, domains, and cultural contexts, with language and country of origin used as a proxy for culture. We focus on two domains, opinionated reviews and personal descriptions about sensitive topics, spanning five languages and six cultural settings. Using various configurations (zero-shot, one-shot, and fine-tuning), we evaluate the performance of LLMs in detecting and generating deceptive text. In detection tasks, our results reveal cross-model and cross-context performance differences. In generation tasks, linguistic analyses show partial alignment with known deception cues in human text, though this knowledge appears largely uniform and context-agnostic.
%U https://aclanthology.org/2025.ranlp-1.101/
%P 884-893
Markdown (Informal)
[Evaluating LLMs on Deceptive Text across Cultures](https://aclanthology.org/2025.ranlp-1.101/) (Papantoniou et al., RANLP 2025)
ACL
- Katerina Papantoniou, Panagiotis Papadakos, and Dimitris Plexousakis. 2025. Evaluating LLMs on Deceptive Text across Cultures. In Proceedings of the 15th International Conference on Recent Advances in Natural Language Processing - Natural Language Processing in the Generative AI Era, pages 884–893, Varna, Bulgaria. INCOMA Ltd., Shoumen, Bulgaria.