@inproceedings{xie-etal-2021-exploring,
title = "Exploring Story Generation with Multi-task Objectives in Variational Autoencoders",
author = "Xie, Zhuohan and
Lau, Jey Han and
Cohn, Trevor",
editor = "Rahimi, Afshin and
Lane, William and
Zuccon, Guido",
booktitle = "Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association",
month = dec,
year = "2021",
address = "Online",
publisher = "Australasian Language Technology Association",
url = "https://aclanthology.org/2021.alta-1.10",
pages = "97--106",
abstract = "GPT-2 has been frequently adapted in story generation models as it provides powerful generative capability. However, it still fails to generate consistent stories and lacks diversity. Current story generation models leverage additional information such as plots or commonsense into GPT-2 to guide the generation process. These approaches focus on improving generation quality of stories while our work look at both quality and diversity. We explore combining BERT and GPT-2 to build a variational autoencoder (VAE), and extend it by adding additional objectives to learn global features such as story topic and discourse relations. Our evaluations show our enhanced VAE can provide better quality and diversity trade off, generate less repetitive story content and learn a more informative latent variable.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="xie-etal-2021-exploring">
<titleInfo>
<title>Exploring Story Generation with Multi-task Objectives in Variational Autoencoders</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhuohan</namePart>
<namePart type="family">Xie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jey</namePart>
<namePart type="given">Han</namePart>
<namePart type="family">Lau</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Trevor</namePart>
<namePart type="family">Cohn</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association</title>
</titleInfo>
<name type="personal">
<namePart type="given">Afshin</namePart>
<namePart type="family">Rahimi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">William</namePart>
<namePart type="family">Lane</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guido</namePart>
<namePart type="family">Zuccon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Australasian Language Technology Association</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>GPT-2 has been frequently adapted in story generation models as it provides powerful generative capability. However, it still fails to generate consistent stories and lacks diversity. Current story generation models leverage additional information such as plots or commonsense into GPT-2 to guide the generation process. These approaches focus on improving generation quality of stories while our work look at both quality and diversity. We explore combining BERT and GPT-2 to build a variational autoencoder (VAE), and extend it by adding additional objectives to learn global features such as story topic and discourse relations. Our evaluations show our enhanced VAE can provide better quality and diversity trade off, generate less repetitive story content and learn a more informative latent variable.</abstract>
<identifier type="citekey">xie-etal-2021-exploring</identifier>
<location>
<url>https://aclanthology.org/2021.alta-1.10</url>
</location>
<part>
<date>2021-12</date>
<extent unit="page">
<start>97</start>
<end>106</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploring Story Generation with Multi-task Objectives in Variational Autoencoders
%A Xie, Zhuohan
%A Lau, Jey Han
%A Cohn, Trevor
%Y Rahimi, Afshin
%Y Lane, William
%Y Zuccon, Guido
%S Proceedings of the 19th Annual Workshop of the Australasian Language Technology Association
%D 2021
%8 December
%I Australasian Language Technology Association
%C Online
%F xie-etal-2021-exploring
%X GPT-2 has been frequently adapted in story generation models as it provides powerful generative capability. However, it still fails to generate consistent stories and lacks diversity. Current story generation models leverage additional information such as plots or commonsense into GPT-2 to guide the generation process. These approaches focus on improving generation quality of stories while our work look at both quality and diversity. We explore combining BERT and GPT-2 to build a variational autoencoder (VAE), and extend it by adding additional objectives to learn global features such as story topic and discourse relations. Our evaluations show our enhanced VAE can provide better quality and diversity trade off, generate less repetitive story content and learn a more informative latent variable.
%U https://aclanthology.org/2021.alta-1.10
%P 97-106
Markdown (Informal)
[Exploring Story Generation with Multi-task Objectives in Variational Autoencoders](https://aclanthology.org/2021.alta-1.10) (Xie et al., ALTA 2021)
ACL