@inproceedings{zhai-etal-2019-hybrid,
title = "A Hybrid Model for Globally Coherent Story Generation",
author = "Zhai, Fangzhou and
Demberg, Vera and
Shkadzko, Pavel and
Shi, Wei and
Sayeed, Asad",
editor = "Ferraro, Francis and
Huang, Ting-Hao {`}Kenneth{'} and
Lukin, Stephanie M. and
Mitchell, Margaret",
booktitle = "Proceedings of the Second Workshop on Storytelling",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W19-3404",
doi = "10.18653/v1/W19-3404",
pages = "34--45",
abstract = "Automatically generating globally coherent stories is a challenging problem. Neural text generation models have been shown to perform well at generating fluent sentences from data, but they usually fail to keep track of the overall coherence of the story after a couple of sentences. Existing work that incorporates a text planning module succeeded in generating recipes and dialogues, but appears quite data-demanding. We propose a novel story generation approach that generates globally coherent stories from a fairly small corpus. The model exploits a symbolic text planning module to produce text plans, thus reducing the demand of data; a neural surface realization module then generates fluent text conditioned on the text plan. Human evaluation showed that our model outperforms various baselines by a wide margin and generates stories which are fluent as well as globally coherent.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhai-etal-2019-hybrid">
<titleInfo>
<title>A Hybrid Model for Globally Coherent Story Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fangzhou</namePart>
<namePart type="family">Zhai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vera</namePart>
<namePart type="family">Demberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pavel</namePart>
<namePart type="family">Shkadzko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Asad</namePart>
<namePart type="family">Sayeed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Storytelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Francis</namePart>
<namePart type="family">Ferraro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ting-Hao</namePart>
<namePart type="given">‘Kenneth’</namePart>
<namePart type="family">Huang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stephanie</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Lukin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Margaret</namePart>
<namePart type="family">Mitchell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Florence, Italy</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Automatically generating globally coherent stories is a challenging problem. Neural text generation models have been shown to perform well at generating fluent sentences from data, but they usually fail to keep track of the overall coherence of the story after a couple of sentences. Existing work that incorporates a text planning module succeeded in generating recipes and dialogues, but appears quite data-demanding. We propose a novel story generation approach that generates globally coherent stories from a fairly small corpus. The model exploits a symbolic text planning module to produce text plans, thus reducing the demand of data; a neural surface realization module then generates fluent text conditioned on the text plan. Human evaluation showed that our model outperforms various baselines by a wide margin and generates stories which are fluent as well as globally coherent.</abstract>
<identifier type="citekey">zhai-etal-2019-hybrid</identifier>
<identifier type="doi">10.18653/v1/W19-3404</identifier>
<location>
<url>https://aclanthology.org/W19-3404</url>
</location>
<part>
<date>2019-08</date>
<extent unit="page">
<start>34</start>
<end>45</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Hybrid Model for Globally Coherent Story Generation
%A Zhai, Fangzhou
%A Demberg, Vera
%A Shkadzko, Pavel
%A Shi, Wei
%A Sayeed, Asad
%Y Ferraro, Francis
%Y Huang, Ting-Hao ‘Kenneth’
%Y Lukin, Stephanie M.
%Y Mitchell, Margaret
%S Proceedings of the Second Workshop on Storytelling
%D 2019
%8 August
%I Association for Computational Linguistics
%C Florence, Italy
%F zhai-etal-2019-hybrid
%X Automatically generating globally coherent stories is a challenging problem. Neural text generation models have been shown to perform well at generating fluent sentences from data, but they usually fail to keep track of the overall coherence of the story after a couple of sentences. Existing work that incorporates a text planning module succeeded in generating recipes and dialogues, but appears quite data-demanding. We propose a novel story generation approach that generates globally coherent stories from a fairly small corpus. The model exploits a symbolic text planning module to produce text plans, thus reducing the demand of data; a neural surface realization module then generates fluent text conditioned on the text plan. Human evaluation showed that our model outperforms various baselines by a wide margin and generates stories which are fluent as well as globally coherent.
%R 10.18653/v1/W19-3404
%U https://aclanthology.org/W19-3404
%U https://doi.org/10.18653/v1/W19-3404
%P 34-45
Markdown (Informal)
[A Hybrid Model for Globally Coherent Story Generation](https://aclanthology.org/W19-3404) (Zhai et al., Story-NLP 2019)
ACL