@InProceedings{li-EtAl:2017:EMNLP20174,
  author    = {Li, Piji  and  Lam, Wai  and  Bing, Lidong  and  Wang, Zihao},
  title     = {Deep Recurrent Generative Decoder for Abstractive Text Summarization},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2091--2100},
  abstract  = {We propose a new framework for abstractive text summarization based on a
	sequence-to-sequence oriented encoder-decoder model equipped with a deep
	recurrent generative decoder (DRGN).
	  Latent structure information implied in the target summaries is learned based
	on a recurrent latent random model for improving the summarization quality.
	  Neural variational inference is employed to address the intractable posterior
	inference for the recurrent latent variables.
	  Abstractive summaries are generated based on both the generative latent
	variables and the discriminative deterministic states.
	  Extensive experiments on some benchmark datasets in different languages show
	that DRGN achieves improvements over the state-of-the-art methods.},
  url       = {https://www.aclweb.org/anthology/D17-1222}
}

