@inproceedings{suzuki-etal-2021-building,
title = "Building a Video-and-Language Dataset with Human Actions for Multimodal Logical Inference",
author = "Suzuki, Riko and
Yanaka, Hitomi and
Mineshima, Koji and
Bekki, Daisuke",
editor = "Donatelli, Lucia and
Krishnaswamy, Nikhil and
Lai, Kenneth and
Pustejovsky, James",
booktitle = "Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)",
month = jun,
year = "2021",
address = "Groningen, Netherlands (Online)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.mmsr-1.10",
pages = "102--107",
abstract = "This paper introduces a new video-and-language dataset with human actions for multimodal logical inference, which focuses on intentional and aspectual expressions that describe dynamic human actions. The dataset consists of 200 videos, 5,554 action labels, and 1,942 action triplets of the form (subject, predicate, object) that can be easily translated into logical semantic representations. The dataset is expected to be useful for evaluating multimodal inference systems between videos and semantically complicated sentences including negation and quantification.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="suzuki-etal-2021-building">
<titleInfo>
<title>Building a Video-and-Language Dataset with Human Actions for Multimodal Logical Inference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Riko</namePart>
<namePart type="family">Suzuki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hitomi</namePart>
<namePart type="family">Yanaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Koji</namePart>
<namePart type="family">Mineshima</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daisuke</namePart>
<namePart type="family">Bekki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Donatelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikhil</namePart>
<namePart type="family">Krishnaswamy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenneth</namePart>
<namePart type="family">Lai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Pustejovsky</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Groningen, Netherlands (Online)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper introduces a new video-and-language dataset with human actions for multimodal logical inference, which focuses on intentional and aspectual expressions that describe dynamic human actions. The dataset consists of 200 videos, 5,554 action labels, and 1,942 action triplets of the form (subject, predicate, object) that can be easily translated into logical semantic representations. The dataset is expected to be useful for evaluating multimodal inference systems between videos and semantically complicated sentences including negation and quantification.</abstract>
<identifier type="citekey">suzuki-etal-2021-building</identifier>
<location>
<url>https://aclanthology.org/2021.mmsr-1.10</url>
</location>
<part>
<date>2021-06</date>
<extent unit="page">
<start>102</start>
<end>107</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Building a Video-and-Language Dataset with Human Actions for Multimodal Logical Inference
%A Suzuki, Riko
%A Yanaka, Hitomi
%A Mineshima, Koji
%A Bekki, Daisuke
%Y Donatelli, Lucia
%Y Krishnaswamy, Nikhil
%Y Lai, Kenneth
%Y Pustejovsky, James
%S Proceedings of the 1st Workshop on Multimodal Semantic Representations (MMSR)
%D 2021
%8 June
%I Association for Computational Linguistics
%C Groningen, Netherlands (Online)
%F suzuki-etal-2021-building
%X This paper introduces a new video-and-language dataset with human actions for multimodal logical inference, which focuses on intentional and aspectual expressions that describe dynamic human actions. The dataset consists of 200 videos, 5,554 action labels, and 1,942 action triplets of the form (subject, predicate, object) that can be easily translated into logical semantic representations. The dataset is expected to be useful for evaluating multimodal inference systems between videos and semantically complicated sentences including negation and quantification.
%U https://aclanthology.org/2021.mmsr-1.10
%P 102-107
Markdown (Informal)
[Building a Video-and-Language Dataset with Human Actions for Multimodal Logical Inference](https://aclanthology.org/2021.mmsr-1.10) (Suzuki et al., MMSR 2021)
ACL