@InProceedings{pasunuru-guo-bansal:2017:FrontiersSummarization,
  author    = {Pasunuru, Ramakanth  and  Guo, Han  and  Bansal, Mohit},
  title     = {Towards Improving Abstractive Summarization via Entailment Generation},
  booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {27--32},
  abstract  = {Abstractive summarization, the task of rewriting and compressing a document
	into a short summary, has achieved considerable success with neural
	sequence-to-sequence models. However, these models can still benefit from
	stronger natural language inference skills, since a correct summary is
	logically entailed by the input document, i.e., it should not contain any
	contradictory or unrelated information. We incorporate such knowledge into an
	abstractive summarization model via multi-task learning, where we share its
	decoder parameters with those of an entailment generation model. We achieve
	promising initial improvements based on multiple metrics and datasets
	(including a test-only setting). The domain mismatch between the entailment
	(captions) and summarization (news) datasets suggests that the model is
	learning some domain-agnostic inference skills.},
  url       = {http://www.aclweb.org/anthology/W17-4504}
}

