@InProceedings{hsu-EtAl:2018:Long,
  author    = {Hsu, Wan-Ting  and  Lin, Chieh-Kai  and  Lee, Ming-Ying  and  Min, Kerui  and  Tang, Jing  and  Sun, Min},
  title     = {A Unified Model for Extractive and Abstractive Summarization using Inconsistency Loss},
  booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {132--141},
  abstract  = {We propose a unified model combining the strength of extractive and abstractive summarization. On the one hand, a simple extractive model can obtain sentence-level attention with high ROUGE scores but less readable. On the other hand, a more complicated abstractive model can obtain word-level dynamic attention to generate a more readable paragraph. In our model, sentence-level attention is used to modulate the word-level attention such that words in less attended sentences are less likely to be generated. Moreover, a novel inconsistency loss function is introduced to penalize the inconsistency between two levels of attentions. By end-to-end training our model with the inconsistency loss and original losses of extractive and abstractive models, we achieve state-of-the-art ROUGE scores while being the most informative and readable summarization on the CNN/Daily Mail dataset in a solid human evaluation.},
  url       = {http://www.aclweb.org/anthology/P18-1013}
}

