@inproceedings{wu-etal-2024-interpretable,
title = "Interpretable Short Video Rumor Detection Based on Modality Tampering",
author = "Wu, Kaixuan and
Lin, Yanghao and
Cao, Donglin and
Lin, Dazhen",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.804",
pages = "9180--9189",
abstract = "With the rapid development of social media and short video applications in recent years, browsing short videos has become the norm. Due to its large user base and unique appeal, spreading rumors via short videos has become a severe social problem. Many methods simply fuse multimodal features for rumor detection, which lack interpretability. For short video rumors, rumor makers create rumors by modifying and/or splicing different modal information, so we should consider how to detect rumors from the perspective of modality tampering. Inspired by cross-modal contrastive learning, we propose a novel short video rumor detection framework by designing two pretraining tasks: modality tampering detection and inter-modal matching, imbuing the model with the ability to detect modality tampering and employing it for downstream rumor detection tasks. In addition, we design an interpretability mechanism to make the rumor detection results more reasonable by backtracking the model{'}s decision-making process. The experimental results show that the method on the short video rumor dataset has an improvement of about 4.6{\%}-12{\%} in macro-F1 compared with other models and can explain whether the short video is a rumor or not through the perspective of modality tampering.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-etal-2024-interpretable">
<titleInfo>
<title>Interpretable Short Video Rumor Detection Based on Modality Tampering</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kaixuan</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yanghao</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Donglin</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dazhen</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the rapid development of social media and short video applications in recent years, browsing short videos has become the norm. Due to its large user base and unique appeal, spreading rumors via short videos has become a severe social problem. Many methods simply fuse multimodal features for rumor detection, which lack interpretability. For short video rumors, rumor makers create rumors by modifying and/or splicing different modal information, so we should consider how to detect rumors from the perspective of modality tampering. Inspired by cross-modal contrastive learning, we propose a novel short video rumor detection framework by designing two pretraining tasks: modality tampering detection and inter-modal matching, imbuing the model with the ability to detect modality tampering and employing it for downstream rumor detection tasks. In addition, we design an interpretability mechanism to make the rumor detection results more reasonable by backtracking the model’s decision-making process. The experimental results show that the method on the short video rumor dataset has an improvement of about 4.6%-12% in macro-F1 compared with other models and can explain whether the short video is a rumor or not through the perspective of modality tampering.</abstract>
<identifier type="citekey">wu-etal-2024-interpretable</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.804</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>9180</start>
<end>9189</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Interpretable Short Video Rumor Detection Based on Modality Tampering
%A Wu, Kaixuan
%A Lin, Yanghao
%A Cao, Donglin
%A Lin, Dazhen
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F wu-etal-2024-interpretable
%X With the rapid development of social media and short video applications in recent years, browsing short videos has become the norm. Due to its large user base and unique appeal, spreading rumors via short videos has become a severe social problem. Many methods simply fuse multimodal features for rumor detection, which lack interpretability. For short video rumors, rumor makers create rumors by modifying and/or splicing different modal information, so we should consider how to detect rumors from the perspective of modality tampering. Inspired by cross-modal contrastive learning, we propose a novel short video rumor detection framework by designing two pretraining tasks: modality tampering detection and inter-modal matching, imbuing the model with the ability to detect modality tampering and employing it for downstream rumor detection tasks. In addition, we design an interpretability mechanism to make the rumor detection results more reasonable by backtracking the model’s decision-making process. The experimental results show that the method on the short video rumor dataset has an improvement of about 4.6%-12% in macro-F1 compared with other models and can explain whether the short video is a rumor or not through the perspective of modality tampering.
%U https://aclanthology.org/2024.lrec-main.804
%P 9180-9189
Markdown (Informal)
[Interpretable Short Video Rumor Detection Based on Modality Tampering](https://aclanthology.org/2024.lrec-main.804) (Wu et al., LREC-COLING 2024)
ACL