@inproceedings{kulkarni-etal-2024-report,
title = "A Report on the {F}ig{L}ang 2024 Shared Task on Multimodal Figurative Language",
author = "Kulkarni, Shreyas and
Saakyan, Arkadiy and
Chakrabarty, Tuhin and
Muresan, Smaranda",
editor = "Ghosh, Debanjan and
Muresan, Smaranda and
Feldman, Anna and
Chakrabarty, Tuhin and
Liu, Emmy",
booktitle = "Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)",
month = jun,
year = "2024",
address = "Mexico City, Mexico (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.figlang-1.16",
doi = "10.18653/v1/2024.figlang-1.16",
pages = "115--119",
abstract = "We present the outcomes of the Multimodal Figurative Language Shared Task held at the 4th Workshop on Figurative Language Processing (FigLang 2024) co-located at NAACL 2024. The task utilized the V-FLUTE dataset which is comprised of $<$image, text$>$ pairs that use figurative language and includes detailed textual explanations for the entailment or contradiction relationship of each pair. The challenge for participants was to develop models capable of accurately identifying the visual entailment relationship in these multimodal instances and generating persuasive free-text explanations. The results showed that the participants{'} models significantly outperformed the initial baselines in both automated and human evaluations. We also provide an overview of the systems submitted and analyze the results of the evaluations. All participating systems outperformed the LLaVA-ZS baseline, provided by us in F1-score.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kulkarni-etal-2024-report">
<titleInfo>
<title>A Report on the FigLang 2024 Shared Task on Multimodal Figurative Language</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shreyas</namePart>
<namePart type="family">Kulkarni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arkadiy</namePart>
<namePart type="family">Saakyan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Debanjan</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Smaranda</namePart>
<namePart type="family">Muresan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Feldman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tuhin</namePart>
<namePart type="family">Chakrabarty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmy</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Mexico City, Mexico (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present the outcomes of the Multimodal Figurative Language Shared Task held at the 4th Workshop on Figurative Language Processing (FigLang 2024) co-located at NAACL 2024. The task utilized the V-FLUTE dataset which is comprised of <image, text> pairs that use figurative language and includes detailed textual explanations for the entailment or contradiction relationship of each pair. The challenge for participants was to develop models capable of accurately identifying the visual entailment relationship in these multimodal instances and generating persuasive free-text explanations. The results showed that the participants’ models significantly outperformed the initial baselines in both automated and human evaluations. We also provide an overview of the systems submitted and analyze the results of the evaluations. All participating systems outperformed the LLaVA-ZS baseline, provided by us in F1-score.</abstract>
<identifier type="citekey">kulkarni-etal-2024-report</identifier>
<identifier type="doi">10.18653/v1/2024.figlang-1.16</identifier>
<location>
<url>https://aclanthology.org/2024.figlang-1.16</url>
</location>
<part>
<date>2024-06</date>
<extent unit="page">
<start>115</start>
<end>119</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Report on the FigLang 2024 Shared Task on Multimodal Figurative Language
%A Kulkarni, Shreyas
%A Saakyan, Arkadiy
%A Chakrabarty, Tuhin
%A Muresan, Smaranda
%Y Ghosh, Debanjan
%Y Muresan, Smaranda
%Y Feldman, Anna
%Y Chakrabarty, Tuhin
%Y Liu, Emmy
%S Proceedings of the 4th Workshop on Figurative Language Processing (FigLang 2024)
%D 2024
%8 June
%I Association for Computational Linguistics
%C Mexico City, Mexico (Hybrid)
%F kulkarni-etal-2024-report
%X We present the outcomes of the Multimodal Figurative Language Shared Task held at the 4th Workshop on Figurative Language Processing (FigLang 2024) co-located at NAACL 2024. The task utilized the V-FLUTE dataset which is comprised of <image, text> pairs that use figurative language and includes detailed textual explanations for the entailment or contradiction relationship of each pair. The challenge for participants was to develop models capable of accurately identifying the visual entailment relationship in these multimodal instances and generating persuasive free-text explanations. The results showed that the participants’ models significantly outperformed the initial baselines in both automated and human evaluations. We also provide an overview of the systems submitted and analyze the results of the evaluations. All participating systems outperformed the LLaVA-ZS baseline, provided by us in F1-score.
%R 10.18653/v1/2024.figlang-1.16
%U https://aclanthology.org/2024.figlang-1.16
%U https://doi.org/10.18653/v1/2024.figlang-1.16
%P 115-119
Markdown (Informal)
[A Report on the FigLang 2024 Shared Task on Multimodal Figurative Language](https://aclanthology.org/2024.figlang-1.16) (Kulkarni et al., Fig-Lang-WS 2024)
ACL