@inproceedings{alnajjar-etal-2022-ring,
title = "Ring That Bell: A Corpus and Method for Multimodal Metaphor Detection in Videos",
author = {Alnajjar, Khalid and
H{\"a}m{\"a}l{\"a}inen, Mika and
Zhang, Shuo},
editor = "Ghosh, Debanjan and
Beigman Klebanov, Beata and
Muresan, Smaranda and
Feldman, Anna and
Poria, Soujanya and
Chakrabarty, Tuhin",
booktitle = "Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.flp-1.4/",
doi = "10.18653/v1/2022.flp-1.4",
pages = "24--33",
abstract = "We present the first openly available multimodal metaphor annotated corpus. The corpus consists of videos including audio and subtitles that have been annotated by experts. Furthermore, we present a method for detecting metaphors in the new dataset based on the textual content of the videos. The method achieves a high F1-score (62{\%}) for metaphorical labels. We also experiment with other modalities and multimodal methods; however, these methods did not out-perform the text-based model. In our error analysis, we do identify that there are cases where video could help in disambiguating metaphors, however, the visual cues are too subtle for our model to capture. The data is available on Zenodo."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alnajjar-etal-2022-ring">
<titleInfo>
<title>Ring That Bell: A Corpus and Method for Multimodal Metaphor Detection in Videos</title>
</titleInfo>
<name type="personal">
<namePart type="given">Khalid</namePart>
<namePart type="family">Alnajjar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mika</namePart>
<namePart type="family">Hämäläinen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shuo</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Beata</namePart>
<namePart type="family">Beigman Klebanov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present the first openly available multimodal metaphor annotated corpus. The corpus consists of videos including audio and subtitles that have been annotated by experts. Furthermore, we present a method for detecting metaphors in the new dataset based on the textual content of the videos. The method achieves a high F1-score (62%) for metaphorical labels. We also experiment with other modalities and multimodal methods; however, these methods did not out-perform the text-based model. In our error analysis, we do identify that there are cases where video could help in disambiguating metaphors, however, the visual cues are too subtle for our model to capture. The data is available on Zenodo.</abstract>
<identifier type="citekey">alnajjar-etal-2022-ring</identifier>
<identifier type="doi">10.18653/v1/2022.flp-1.4</identifier>
<location>
<url>https://aclanthology.org/2022.flp-1.4/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>24</start>
<end>33</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Ring That Bell: A Corpus and Method for Multimodal Metaphor Detection in Videos
%A Alnajjar, Khalid
%A Hämäläinen, Mika
%A Zhang, Shuo
%Y Ghosh, Debanjan
%Y Beigman Klebanov, Beata
%Y Muresan, Smaranda
%Y Feldman, Anna
%Y Poria, Soujanya
%Y Chakrabarty, Tuhin
%S Proceedings of the 3rd Workshop on Figurative Language Processing (FLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F alnajjar-etal-2022-ring
%X We present the first openly available multimodal metaphor annotated corpus. The corpus consists of videos including audio and subtitles that have been annotated by experts. Furthermore, we present a method for detecting metaphors in the new dataset based on the textual content of the videos. The method achieves a high F1-score (62%) for metaphorical labels. We also experiment with other modalities and multimodal methods; however, these methods did not out-perform the text-based model. In our error analysis, we do identify that there are cases where video could help in disambiguating metaphors, however, the visual cues are too subtle for our model to capture. The data is available on Zenodo.
%R 10.18653/v1/2022.flp-1.4
%U https://aclanthology.org/2022.flp-1.4/
%U https://doi.org/10.18653/v1/2022.flp-1.4
%P 24-33
Markdown (Informal)
[Ring That Bell: A Corpus and Method for Multimodal Metaphor Detection in Videos](https://aclanthology.org/2022.flp-1.4/) (Alnajjar et al., Fig-Lang 2022)
ACL