@InProceedings{tilk-alumae:2017:FrontiersSummarization,
  author    = {Tilk, Ottokar  and  Alum\"{a}e, Tanel},
  title     = {Low-Resource Neural Headline Generation},
  booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {20--26},
  abstract  = {Recent neural headline generation models
	have shown great results, but are generally
	trained on very large datasets. We focus
	our efforts on improving headline quality
	on smaller datasets by the means of pretraining.
	We propose new methods that
	enable pre-training all the parameters of
	the model and utilize all available text, resulting
	in improvements by up to 32.4%
	relative in perplexity and 2.84 points in
	ROUGE.},
  url       = {http://www.aclweb.org/anthology/W17-4503}
}

