@inproceedings{iokawa-yanaka-2024-visual,
title = "Visual-Textual Entailment with Quantities Using Model Checking and Knowledge Injection",
author = "Iokawa, Nobuyuki and
Yanaka, Hitomi",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.1512",
pages = "17397--17408",
abstract = "In recent years, there has been great interest in multimodal inference. We concentrate on visual-textual entailment (VTE), a critical task in multimodal inference. VTE is the task of determining entailment relations between an image and a sentence. Several deep learning-based approaches have been proposed for VTE, but current approaches struggle with accurately handling quantities. On the other hand, one promising approach, one based on logical inference that can successfully deal with large quantities, has also been proposed. However, that approach uses automated theorem provers, increasing the computational cost for problems involving many entities. In addition, that approach cannot deal well with lexical differences between the semantic representations of images and sentences. In this paper, we present a logic-based VTE system that overcomes these drawbacks, using model checking for inference to increase efficiency and knowledge injection to perform more robust inference. We create a VTE dataset containing quantities and negation to assess how well VTE systems understand such phenomena. Using this dataset, we demonstrate that our system solves VTE tasks with quantities and negation more robustly than previous approaches.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="iokawa-yanaka-2024-visual">
<titleInfo>
<title>Visual-Textual Entailment with Quantities Using Model Checking and Knowledge Injection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nobuyuki</namePart>
<namePart type="family">Iokawa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hitomi</namePart>
<namePart type="family">Yanaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, there has been great interest in multimodal inference. We concentrate on visual-textual entailment (VTE), a critical task in multimodal inference. VTE is the task of determining entailment relations between an image and a sentence. Several deep learning-based approaches have been proposed for VTE, but current approaches struggle with accurately handling quantities. On the other hand, one promising approach, one based on logical inference that can successfully deal with large quantities, has also been proposed. However, that approach uses automated theorem provers, increasing the computational cost for problems involving many entities. In addition, that approach cannot deal well with lexical differences between the semantic representations of images and sentences. In this paper, we present a logic-based VTE system that overcomes these drawbacks, using model checking for inference to increase efficiency and knowledge injection to perform more robust inference. We create a VTE dataset containing quantities and negation to assess how well VTE systems understand such phenomena. Using this dataset, we demonstrate that our system solves VTE tasks with quantities and negation more robustly than previous approaches.</abstract>
<identifier type="citekey">iokawa-yanaka-2024-visual</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.1512</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>17397</start>
<end>17408</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Visual-Textual Entailment with Quantities Using Model Checking and Knowledge Injection
%A Iokawa, Nobuyuki
%A Yanaka, Hitomi
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F iokawa-yanaka-2024-visual
%X In recent years, there has been great interest in multimodal inference. We concentrate on visual-textual entailment (VTE), a critical task in multimodal inference. VTE is the task of determining entailment relations between an image and a sentence. Several deep learning-based approaches have been proposed for VTE, but current approaches struggle with accurately handling quantities. On the other hand, one promising approach, one based on logical inference that can successfully deal with large quantities, has also been proposed. However, that approach uses automated theorem provers, increasing the computational cost for problems involving many entities. In addition, that approach cannot deal well with lexical differences between the semantic representations of images and sentences. In this paper, we present a logic-based VTE system that overcomes these drawbacks, using model checking for inference to increase efficiency and knowledge injection to perform more robust inference. We create a VTE dataset containing quantities and negation to assess how well VTE systems understand such phenomena. Using this dataset, we demonstrate that our system solves VTE tasks with quantities and negation more robustly than previous approaches.
%U https://aclanthology.org/2024.lrec-main.1512
%P 17397-17408
Markdown (Informal)
[Visual-Textual Entailment with Quantities Using Model Checking and Knowledge Injection](https://aclanthology.org/2024.lrec-main.1512) (Iokawa & Yanaka, LREC-COLING 2024)
ACL