@inproceedings{tozzo-etal-2018-neural,
title = "Neural Event Extraction from Movies Description",
author = "Tozzo, Alex and
Jovanovi{\'c}, Dejan and
Amer, Mohamed",
editor = "Mitchell, Margaret and
Huang, Ting-Hao {`}Kenneth{'} and
Ferraro, Francis and
Misra, Ishan",
booktitle = "Proceedings of the First Workshop on Storytelling",
month = jun,
year = "2018",
address = "New Orleans, Louisiana",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-1507",
doi = "10.18653/v1/W18-1507",
pages = "60--66",
abstract = "We present a novel approach for event extraction and abstraction from movie descriptions. Our event frame consists of {``}who{''}, {``}did what{''} {``}to whom{''}, {``}where{''}, and {``}when{''}. We formulate our problem using a recurrent neural network, enhanced with structural features extracted from syntactic parser, and trained using curriculum learning by progressively increasing the difficulty of the sentences. Our model serves as an intermediate step towards question answering systems, visual storytelling, and story completion tasks. We evaluate our approach on MovieQA dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tozzo-etal-2018-neural">
<titleInfo>
<title>Neural Event Extraction from Movies Description</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alex</namePart>
<namePart type="family">Tozzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dejan</namePart>
<namePart type="family">Jovanović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohamed</namePart>
<namePart type="family">Amer</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-06</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Storytelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Margaret</namePart>
<namePart type="family">Mitchell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ting-Hao</namePart>
<namePart type="given">‘Kenneth’</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Francis</namePart>
<namePart type="family">Ferraro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ishan</namePart>
<namePart type="family">Misra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">New Orleans, Louisiana</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We present a novel approach for event extraction and abstraction from movie descriptions. Our event frame consists of “who”, “did what” “to whom”, “where”, and “when”. We formulate our problem using a recurrent neural network, enhanced with structural features extracted from syntactic parser, and trained using curriculum learning by progressively increasing the difficulty of the sentences. Our model serves as an intermediate step towards question answering systems, visual storytelling, and story completion tasks. We evaluate our approach on MovieQA dataset.</abstract>
<identifier type="citekey">tozzo-etal-2018-neural</identifier>
<identifier type="doi">10.18653/v1/W18-1507</identifier>
<location>
<url>https://aclanthology.org/W18-1507</url>
</location>
<part>
<date>2018-06</date>
<extent unit="page">
<start>60</start>
<end>66</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural Event Extraction from Movies Description
%A Tozzo, Alex
%A Jovanović, Dejan
%A Amer, Mohamed
%Y Mitchell, Margaret
%Y Huang, Ting-Hao ‘Kenneth’
%Y Ferraro, Francis
%Y Misra, Ishan
%S Proceedings of the First Workshop on Storytelling
%D 2018
%8 June
%I Association for Computational Linguistics
%C New Orleans, Louisiana
%F tozzo-etal-2018-neural
%X We present a novel approach for event extraction and abstraction from movie descriptions. Our event frame consists of “who”, “did what” “to whom”, “where”, and “when”. We formulate our problem using a recurrent neural network, enhanced with structural features extracted from syntactic parser, and trained using curriculum learning by progressively increasing the difficulty of the sentences. Our model serves as an intermediate step towards question answering systems, visual storytelling, and story completion tasks. We evaluate our approach on MovieQA dataset.
%R 10.18653/v1/W18-1507
%U https://aclanthology.org/W18-1507
%U https://doi.org/10.18653/v1/W18-1507
%P 60-66
Markdown (Informal)
[Neural Event Extraction from Movies Description](https://aclanthology.org/W18-1507) (Tozzo et al., Story-NLP 2018)
ACL
- Alex Tozzo, Dejan Jovanović, and Mohamed Amer. 2018. Neural Event Extraction from Movies Description. In Proceedings of the First Workshop on Storytelling, pages 60–66, New Orleans, Louisiana. Association for Computational Linguistics.