@InProceedings{du-shao-cardie:2017:Long,
  author    = {Du, Xinya  and  Shao, Junru  and  Cardie, Claire},
  title     = {Learning to Ask: Neural Question Generation for Reading Comprehension},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1342--1352},
  abstract  = {We study automatic question generation for sentences from text passages in
	reading comprehension. We introduce an attention-based sequence learning model
	for the task and investigate the effect of encoding sentence- vs.
	paragraph-level information. In contrast to all previous work, our model does
	not rely on hand-crafted rules or a sophisticated NLP pipeline;  it is instead
	trainable end-to-end via sequence-to-sequence learning. Automatic evaluation
	results show that our system significantly outperforms the state-of-the-art
	rule-based system. In human evaluations, questions generated by our system are
	also rated as being more natural (\ie, grammaticality, fluency) and as more
	difficult to answer (in terms of syntactic and lexical divergence from the
	original text and reasoning needed to answer).},
  url       = {http://aclweb.org/anthology/P17-1123}
}

