@inproceedings{braverman-etal-2025-decepbench,
title = "{D}ecep{B}ench: Benchmarking Multimodal Deception Detection",
author = "Braverman, Ethan and
Maganti, Vittesh and
Lalye, Nysa and
Ganti, Akhil and
Lu, Michael and
Zhu, Kevin and
Sharma, Vasu and
O{'}Brien, Sean",
editor = "Hale, James and
Kwon, Brian Deuksin and
Dutt, Ritam",
booktitle = "Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.sicon-1.3/",
doi = "10.18653/v1/2025.sicon-1.3",
pages = "50--64",
ISBN = "979-8-89176-266-4",
abstract = "Deception detection is crucial in domains such as security, forensics, and legal proceedings, as well as to ensure the reliability of AI systems. However, current approaches are limited by the lack of generalizable and interpretable benchmarks built on large and diverse datasets. To address this gap, we introduce DecepBench, a comprehensive and robust benchmark for multimodal deception detection. DecepBench includes an enhanced version of the DOLOS dataset, the largest game-show deception dataset (1,700 labeled video clips with audio). We augment each video clip with transcripts, introducing a third modality (text) and incorporating deception-related features identified in psychological research. We employ explainable methods to evaluate the relevance of key deception cues, providing insights into model limitations and guiding future improvements. Our enhancements to DOLOS, combined with these interpretable analyses, yield improved performance and a deeper understanding of multimodal deception detection."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="braverman-etal-2025-decepbench">
<titleInfo>
<title>DecepBench: Benchmarking Multimodal Deception Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ethan</namePart>
<namePart type="family">Braverman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vittesh</namePart>
<namePart type="family">Maganti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nysa</namePart>
<namePart type="family">Lalye</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Akhil</namePart>
<namePart type="family">Ganti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kevin</namePart>
<namePart type="family">Zhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vasu</namePart>
<namePart type="family">Sharma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sean</namePart>
<namePart type="family">O’Brien</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Hale</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Brian</namePart>
<namePart type="given">Deuksin</namePart>
<namePart type="family">Kwon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ritam</namePart>
<namePart type="family">Dutt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-266-4</identifier>
</relatedItem>
<abstract>Deception detection is crucial in domains such as security, forensics, and legal proceedings, as well as to ensure the reliability of AI systems. However, current approaches are limited by the lack of generalizable and interpretable benchmarks built on large and diverse datasets. To address this gap, we introduce DecepBench, a comprehensive and robust benchmark for multimodal deception detection. DecepBench includes an enhanced version of the DOLOS dataset, the largest game-show deception dataset (1,700 labeled video clips with audio). We augment each video clip with transcripts, introducing a third modality (text) and incorporating deception-related features identified in psychological research. We employ explainable methods to evaluate the relevance of key deception cues, providing insights into model limitations and guiding future improvements. Our enhancements to DOLOS, combined with these interpretable analyses, yield improved performance and a deeper understanding of multimodal deception detection.</abstract>
<identifier type="citekey">braverman-etal-2025-decepbench</identifier>
<identifier type="doi">10.18653/v1/2025.sicon-1.3</identifier>
<location>
<url>https://aclanthology.org/2025.sicon-1.3/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>50</start>
<end>64</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T DecepBench: Benchmarking Multimodal Deception Detection
%A Braverman, Ethan
%A Maganti, Vittesh
%A Lalye, Nysa
%A Ganti, Akhil
%A Lu, Michael
%A Zhu, Kevin
%A Sharma, Vasu
%A O’Brien, Sean
%Y Hale, James
%Y Kwon, Brian Deuksin
%Y Dutt, Ritam
%S Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-266-4
%F braverman-etal-2025-decepbench
%X Deception detection is crucial in domains such as security, forensics, and legal proceedings, as well as to ensure the reliability of AI systems. However, current approaches are limited by the lack of generalizable and interpretable benchmarks built on large and diverse datasets. To address this gap, we introduce DecepBench, a comprehensive and robust benchmark for multimodal deception detection. DecepBench includes an enhanced version of the DOLOS dataset, the largest game-show deception dataset (1,700 labeled video clips with audio). We augment each video clip with transcripts, introducing a third modality (text) and incorporating deception-related features identified in psychological research. We employ explainable methods to evaluate the relevance of key deception cues, providing insights into model limitations and guiding future improvements. Our enhancements to DOLOS, combined with these interpretable analyses, yield improved performance and a deeper understanding of multimodal deception detection.
%R 10.18653/v1/2025.sicon-1.3
%U https://aclanthology.org/2025.sicon-1.3/
%U https://doi.org/10.18653/v1/2025.sicon-1.3
%P 50-64
Markdown (Informal)
[DecepBench: Benchmarking Multimodal Deception Detection](https://aclanthology.org/2025.sicon-1.3/) (Braverman et al., SICon 2025)
ACL
- Ethan Braverman, Vittesh Maganti, Nysa Lalye, Akhil Ganti, Michael Lu, Kevin Zhu, Vasu Sharma, and Sean O’Brien. 2025. DecepBench: Benchmarking Multimodal Deception Detection. In Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025), pages 50–64, Vienna, Austria. Association for Computational Linguistics.