@inproceedings{tilk-alumae-2017-low,
title = "Low-Resource Neural Headline Generation",
author = {Tilk, Ottokar and
Alum{\"a}e, Tanel},
editor = "Wang, Lu and
Cheung, Jackie Chi Kit and
Carenini, Giuseppe and
Liu, Fei",
booktitle = "Proceedings of the Workshop on New Frontiers in Summarization",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4503",
doi = "10.18653/v1/W17-4503",
pages = "20--26",
abstract = "Recent neural headline generation models have shown great results, but are generally trained on very large datasets. We focus our efforts on improving headline quality on smaller datasets by the means of pretraining. We propose new methods that enable pre-training all the parameters of the model and utilize all available text, resulting in improvements by up to 32.4{\%} relative in perplexity and 2.84 points in ROUGE.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tilk-alumae-2017-low">
<titleInfo>
<title>Low-Resource Neural Headline Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ottokar</namePart>
<namePart type="family">Tilk</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanel</namePart>
<namePart type="family">Alumäe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on New Frontiers in Summarization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jackie</namePart>
<namePart type="given">Chi</namePart>
<namePart type="given">Kit</namePart>
<namePart type="family">Cheung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giuseppe</namePart>
<namePart type="family">Carenini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Copenhagen, Denmark</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent neural headline generation models have shown great results, but are generally trained on very large datasets. We focus our efforts on improving headline quality on smaller datasets by the means of pretraining. We propose new methods that enable pre-training all the parameters of the model and utilize all available text, resulting in improvements by up to 32.4% relative in perplexity and 2.84 points in ROUGE.</abstract>
<identifier type="citekey">tilk-alumae-2017-low</identifier>
<identifier type="doi">10.18653/v1/W17-4503</identifier>
<location>
<url>https://aclanthology.org/W17-4503</url>
</location>
<part>
<date>2017-09</date>
<extent unit="page">
<start>20</start>
<end>26</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Low-Resource Neural Headline Generation
%A Tilk, Ottokar
%A Alumäe, Tanel
%Y Wang, Lu
%Y Cheung, Jackie Chi Kit
%Y Carenini, Giuseppe
%Y Liu, Fei
%S Proceedings of the Workshop on New Frontiers in Summarization
%D 2017
%8 September
%I Association for Computational Linguistics
%C Copenhagen, Denmark
%F tilk-alumae-2017-low
%X Recent neural headline generation models have shown great results, but are generally trained on very large datasets. We focus our efforts on improving headline quality on smaller datasets by the means of pretraining. We propose new methods that enable pre-training all the parameters of the model and utilize all available text, resulting in improvements by up to 32.4% relative in perplexity and 2.84 points in ROUGE.
%R 10.18653/v1/W17-4503
%U https://aclanthology.org/W17-4503
%U https://doi.org/10.18653/v1/W17-4503
%P 20-26
Markdown (Informal)
[Low-Resource Neural Headline Generation](https://aclanthology.org/W17-4503) (Tilk & Alumäe, 2017)
ACL
- Ottokar Tilk and Tanel Alumäe. 2017. Low-Resource Neural Headline Generation. In Proceedings of the Workshop on New Frontiers in Summarization, pages 20–26, Copenhagen, Denmark. Association for Computational Linguistics.