@inproceedings{raj-etal-2022-cross,
title = "Cross-modal Transfer Between Vision and Language for Protest Detection",
author = "Raj, Ria and
Andreasson, Kajsa and
Norlund, Tobias and
Johansson, Richard and
Lagerberg, Aron",
booktitle = "Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.case-1.8",
doi = "10.18653/v1/2022.case-1.8",
pages = "56--60",
abstract = "Most of today{'}s systems for socio-political event detection are text-based, while an increasing amount of information published on the web is multi-modal. We seek to bridge this gap by proposing a method that utilizes existing annotated unimodal data to perform event detection in another data modality, zero-shot. Specifically, we focus on protest detection in text and images, and show that a pretrained vision-and-language alignment model (CLIP) can be leveraged towards this end. In particular, our results suggest that annotated protest text data can act supplementarily for detecting protests in images, but significant transfer is demonstrated in the opposite direction as well.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="raj-etal-2022-cross">
<titleInfo>
<title>Cross-modal Transfer Between Vision and Language for Protest Detection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ria</namePart>
<namePart type="family">Raj</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kajsa</namePart>
<namePart type="family">Andreasson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tobias</namePart>
<namePart type="family">Norlund</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Richard</namePart>
<namePart type="family">Johansson</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aron</namePart>
<namePart type="family">Lagerberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Most of today’s systems for socio-political event detection are text-based, while an increasing amount of information published on the web is multi-modal. We seek to bridge this gap by proposing a method that utilizes existing annotated unimodal data to perform event detection in another data modality, zero-shot. Specifically, we focus on protest detection in text and images, and show that a pretrained vision-and-language alignment model (CLIP) can be leveraged towards this end. In particular, our results suggest that annotated protest text data can act supplementarily for detecting protests in images, but significant transfer is demonstrated in the opposite direction as well.</abstract>
<identifier type="citekey">raj-etal-2022-cross</identifier>
<identifier type="doi">10.18653/v1/2022.case-1.8</identifier>
<location>
<url>https://aclanthology.org/2022.case-1.8</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>56</start>
<end>60</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Cross-modal Transfer Between Vision and Language for Protest Detection
%A Raj, Ria
%A Andreasson, Kajsa
%A Norlund, Tobias
%A Johansson, Richard
%A Lagerberg, Aron
%S Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F raj-etal-2022-cross
%X Most of today’s systems for socio-political event detection are text-based, while an increasing amount of information published on the web is multi-modal. We seek to bridge this gap by proposing a method that utilizes existing annotated unimodal data to perform event detection in another data modality, zero-shot. Specifically, we focus on protest detection in text and images, and show that a pretrained vision-and-language alignment model (CLIP) can be leveraged towards this end. In particular, our results suggest that annotated protest text data can act supplementarily for detecting protests in images, but significant transfer is demonstrated in the opposite direction as well.
%R 10.18653/v1/2022.case-1.8
%U https://aclanthology.org/2022.case-1.8
%U https://doi.org/10.18653/v1/2022.case-1.8
%P 56-60
Markdown (Informal)
[Cross-modal Transfer Between Vision and Language for Protest Detection](https://aclanthology.org/2022.case-1.8) (Raj et al., CASE 2022)
ACL
- Ria Raj, Kajsa Andreasson, Tobias Norlund, Richard Johansson, and Aron Lagerberg. 2022. Cross-modal Transfer Between Vision and Language for Protest Detection. In Proceedings of the 5th Workshop on Challenges and Applications of Automated Extraction of Socio-political Events from Text (CASE), pages 56–60, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.