@InProceedings{yasunaga-EtAl:2017:CoNLL,
  author    = {Yasunaga, Michihiro  and  Zhang, Rui  and  Meelu, Kshitijh  and  Pareek, Ayush  and  Srinivasan, Krishnan  and  Radev, Dragomir},
  title     = {Graph-based Neural Multi-Document Summarization},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {452--462},
  abstract  = {We propose a neural multi-document summarization system that incorporates
	sentence relation graphs.
	We employ a Graph Convolutional Network (GCN) on the relation graphs, with
	sentence embeddings obtained from Recurrent Neural Networks as input node
	features.
	Through multiple layer-wise propagation, the GCN generates high-level hidden
	sentence features for salience estimation.
	We then use a greedy heuristic to extract salient sentences that avoid
	redundancy.
	In our experiments on DUC 2004, we consider three types of sentence relation
	graphs and demonstrate the advantage of combining sentence relations in graphs
	with the representation power of deep neural networks.
	Our model improves upon other traditional graph-based extractive approaches and
	the vanilla GRU sequence model with no graph, and it achieves competitive
	results against other state-of-the-art multi-document summarization systems.},
  url       = {http://aclweb.org/anthology/K17-1045}
}

