@article{regneri-etal-2013-grounding,
title = "Grounding Action Descriptions in Videos",
author = "Regneri, Michaela and
Rohrbach, Marcus and
Wetzel, Dominikus and
Thater, Stefan and
Schiele, Bernt and
Pinkal, Manfred",
editor = "Lin, Dekang and
Collins, Michael",
journal = "Transactions of the Association for Computational Linguistics",
volume = "1",
year = "2013",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/Q13-1003",
doi = "10.1162/tacl_a_00207",
pages = "25--36",
abstract = "Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="regneri-etal-2013-grounding">
<titleInfo>
<title>Grounding Action Descriptions in Videos</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michaela</namePart>
<namePart type="family">Regneri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marcus</namePart>
<namePart type="family">Rohrbach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dominikus</namePart>
<namePart type="family">Wetzel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Thater</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bernt</namePart>
<namePart type="family">Schiele</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Pinkal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2013</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.</abstract>
<identifier type="citekey">regneri-etal-2013-grounding</identifier>
<identifier type="doi">10.1162/tacl_a_00207</identifier>
<location>
<url>https://aclanthology.org/Q13-1003</url>
</location>
<part>
<date>2013</date>
<detail type="volume"><number>1</number></detail>
<extent unit="page">
<start>25</start>
<end>36</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Grounding Action Descriptions in Videos
%A Regneri, Michaela
%A Rohrbach, Marcus
%A Wetzel, Dominikus
%A Thater, Stefan
%A Schiele, Bernt
%A Pinkal, Manfred
%J Transactions of the Association for Computational Linguistics
%D 2013
%V 1
%I MIT Press
%C Cambridge, MA
%F regneri-etal-2013-grounding
%X Recent work has shown that the integration of visual information into text-based models can substantially improve model predictions, but so far only visual information extracted from static images has been used. In this paper, we consider the problem of grounding sentences describing actions in visual information extracted from videos. We present a general purpose corpus that aligns high quality videos with multiple natural language descriptions of the actions portrayed in the videos, together with an annotation of how similar the action descriptions are to each other. Experimental results demonstrate that a text-based model of similarity between actions improves substantially when combined with visual information from videos depicting the described actions.
%R 10.1162/tacl_a_00207
%U https://aclanthology.org/Q13-1003
%U https://doi.org/10.1162/tacl_a_00207
%P 25-36
Markdown (Informal)
[Grounding Action Descriptions in Videos](https://aclanthology.org/Q13-1003) (Regneri et al., TACL 2013)
ACL
- Michaela Regneri, Marcus Rohrbach, Dominikus Wetzel, Stefan Thater, Bernt Schiele, and Manfred Pinkal. 2013. Grounding Action Descriptions in Videos. Transactions of the Association for Computational Linguistics, 1:25–36.