@inproceedings{wu-etal-2025-efficient,
title = "Efficient Cross-modal Prompt Learning with Semantic Enhancement for Domain-robust Fake News Detection",
author = "Wu, Fei and
Jin, Hao and
Hu, Changhui and
Ji, Yimu and
Jing, Xiao-Yuan and
Jiang, Guo-Ping",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.coling-main.282/",
pages = "4175--4185",
abstract = "With the development of multimedia technology, online social media has become a major medium for people to access news, but meanwhile, it has also exacerbated the dissemination of multi-modal fake news. An automatic and efficient multi-modal fake news detection (MFND) method is urgently needed. Existing MFND methods usually conduct cross-modal information interaction at later stage, resulting in insufficient exploration of complementary information between modalities. Another challenge lies in the differences among news data from different domains, leading to the weak generalization ability in detecting news from various domains. In this work, we propose an efficient Cross-modal Prompt Learning with Semantic enhancement method for Domain-robust fake news detection (CPLSD). Specifically, we design an efficient cross-modal prompt interaction module, which utilizes prompt as medium to realize lightweight cross-modal information interaction in the early stage of feature extraction, enabling to exploit rich modality complementary information. We design a domain-general prompt generation module that can adaptively blend domain-specific news features to generate domain-general prompts, for improving the domain generalization ability of the model. Furthermore, an image semantic enhancement module is designed to achieve image-to-text translation, fully exploring the semantic discriminative information of the image modality. Extensive experiments conducted on three MFND benchmarks demonstrate the superiority of our proposed approach over existing state-of-the-art MFND methods."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-etal-2025-efficient">
<titleInfo>
<title>Efficient Cross-modal Prompt Learning with Semantic Enhancement for Domain-robust Fake News Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hao</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Changhui</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yimu</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiao-Yuan</namePart>
<namePart type="family">Jing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guo-Ping</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-01</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 31st International Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Owen</namePart>
<namePart type="family">Rambow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leo</namePart>
<namePart type="family">Wanner</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marianna</namePart>
<namePart type="family">Apidianaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hend</namePart>
<namePart type="family">Al-Khalifa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Barbara</namePart>
<namePart type="given">Di</namePart>
<namePart type="family">Eugenio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Schockaert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, UAE</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>With the development of multimedia technology, online social media has become a major medium for people to access news, but meanwhile, it has also exacerbated the dissemination of multi-modal fake news. An automatic and efficient multi-modal fake news detection (MFND) method is urgently needed. Existing MFND methods usually conduct cross-modal information interaction at later stage, resulting in insufficient exploration of complementary information between modalities. Another challenge lies in the differences among news data from different domains, leading to the weak generalization ability in detecting news from various domains. In this work, we propose an efficient Cross-modal Prompt Learning with Semantic enhancement method for Domain-robust fake news detection (CPLSD). Specifically, we design an efficient cross-modal prompt interaction module, which utilizes prompt as medium to realize lightweight cross-modal information interaction in the early stage of feature extraction, enabling to exploit rich modality complementary information. We design a domain-general prompt generation module that can adaptively blend domain-specific news features to generate domain-general prompts, for improving the domain generalization ability of the model. Furthermore, an image semantic enhancement module is designed to achieve image-to-text translation, fully exploring the semantic discriminative information of the image modality. Extensive experiments conducted on three MFND benchmarks demonstrate the superiority of our proposed approach over existing state-of-the-art MFND methods.</abstract>
<identifier type="citekey">wu-etal-2025-efficient</identifier>
<location>
<url>https://aclanthology.org/2025.coling-main.282/</url>
</location>
<part>
<date>2025-01</date>
<extent unit="page">
<start>4175</start>
<end>4185</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Efficient Cross-modal Prompt Learning with Semantic Enhancement for Domain-robust Fake News Detection
%A Wu, Fei
%A Jin, Hao
%A Hu, Changhui
%A Ji, Yimu
%A Jing, Xiao-Yuan
%A Jiang, Guo-Ping
%Y Rambow, Owen
%Y Wanner, Leo
%Y Apidianaki, Marianna
%Y Al-Khalifa, Hend
%Y Eugenio, Barbara Di
%Y Schockaert, Steven
%S Proceedings of the 31st International Conference on Computational Linguistics
%D 2025
%8 January
%I Association for Computational Linguistics
%C Abu Dhabi, UAE
%F wu-etal-2025-efficient
%X With the development of multimedia technology, online social media has become a major medium for people to access news, but meanwhile, it has also exacerbated the dissemination of multi-modal fake news. An automatic and efficient multi-modal fake news detection (MFND) method is urgently needed. Existing MFND methods usually conduct cross-modal information interaction at later stage, resulting in insufficient exploration of complementary information between modalities. Another challenge lies in the differences among news data from different domains, leading to the weak generalization ability in detecting news from various domains. In this work, we propose an efficient Cross-modal Prompt Learning with Semantic enhancement method for Domain-robust fake news detection (CPLSD). Specifically, we design an efficient cross-modal prompt interaction module, which utilizes prompt as medium to realize lightweight cross-modal information interaction in the early stage of feature extraction, enabling to exploit rich modality complementary information. We design a domain-general prompt generation module that can adaptively blend domain-specific news features to generate domain-general prompts, for improving the domain generalization ability of the model. Furthermore, an image semantic enhancement module is designed to achieve image-to-text translation, fully exploring the semantic discriminative information of the image modality. Extensive experiments conducted on three MFND benchmarks demonstrate the superiority of our proposed approach over existing state-of-the-art MFND methods.
%U https://aclanthology.org/2025.coling-main.282/
%P 4175-4185
Markdown (Informal)
[Efficient Cross-modal Prompt Learning with Semantic Enhancement for Domain-robust Fake News Detection](https://aclanthology.org/2025.coling-main.282/) (Wu et al., COLING 2025)
ACL