@InProceedings{li-EtAl:2017:EMNLP20173,
  author    = {Li, Piji  and  Lam, Wai  and  Bing, Lidong  and  Guo, Weiwei  and  Li, Hang},
  title     = {Cascaded Attention based Unsupervised Information Distillation for Compressive Summarization},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2081--2090},
  abstract  = {When people recall and digest what they have read for writing summaries, the
	important content is more likely to attract their attention.
	  Inspired by this observation, we propose a cascaded attention based
	unsupervised
	  model to estimate the salience information from the text for compressive
	multi-document summarization.
	  The attention weights are learned automatically by an unsupervised data
	reconstruction framework which can capture the sentence salience.
	  By adding sparsity constraints on the number of output vectors, we can
	generate condensed information which can be treated as word salience.
	  Fine-grained and coarse-grained sentence compression strategies are
	incorporated to produce compressive summaries. 
	  Experiments on some benchmark data sets show that our framework achieves
	better results than the state-of-the-art methods.},
  url       = {https://www.aclweb.org/anthology/D17-1221}
}

