@InProceedings{xu-EtAl:2017:MultiLing2017,
  author    = {Xu, Ying  and  Lau, Jey Han  and  Baldwin, Timothy  and  Cohn, Trevor},
  title     = {Decoupling Encoder and Decoder Networks for Abstractive Document Summarization},
  booktitle = {Proceedings of the MultiLing 2017 Workshop on Summarization and Summary Evaluation Across Source Types and Genres},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {7--11},
  abstract  = {Abstractive document summarization seeks to automatically generate a summary
	for a document, based on some abstract ''understanding'' of the original
	document. State-of-the-art techniques traditionally use
	attentive encoder--decoder architectures.  However, due to the large number of
	parameters in these models, they require large training datasets and long
	training times. In this paper, we propose decoupling the encoder and decoder
	networks, and training them separately.  We encode documents using an
	unsupervised document encoder, and then feed the document vector to a recurrent
	neural network decoder. With this decoupled architecture, we decrease the
	number of parameters in the decoder substantially, and shorten its training
	time.  Experiments show that the decoupled model achieves comparable
	performance with state-of-the-art models for in-domain documents, but less well
	for out-of-domain documents.},
  url       = {http://www.aclweb.org/anthology/W17-1002}
}

