@InProceedings{tan-wan-xiao:2017:Long,
  author    = {Tan, Jiwei  and  Wan, Xiaojun  and  Xiao, Jianguo},
  title     = {Abstractive Document Summarization with a Graph-Based Attentional Neural Model},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1171--1181},
  abstract  = {Abstractive summarization is the ultimate goal of document summarization
	research, but previously it is less investigated due to the immaturity of text
	generation techniques. Recently impressive progress has been made to
	abstractive sentence summarization using neural models. Unfortunately, attempts
	on abstractive document summarization are still in a primitive stage, and the
	evaluation results are worse than extractive methods on benchmark datasets. In
	this paper, we review the difficulties of neural abstractive document
	summarization, and propose a novel graph-based attention mechanism in the
	sequence-to-sequence framework. The intuition is to address the saliency factor
	of summarization, which has been overlooked by prior works. Experimental
	results demonstrate our model is able to achieve considerable improvement over
	previous neural abstractive models. The data-driven neural abstractive method
	is also competitive with state-of-the-art extractive methods.},
  url       = {http://aclweb.org/anthology/P17-1108}
}

