@InProceedings{yuan-EtAl:2017:RepL4NLP,
  author    = {Yuan, Xingdi  and  Wang, Tong  and  Gulcehre, Caglar  and  Sordoni, Alessandro  and  Bachman, Philip  and  Zhang, Saizheng  and  Subramanian, Sandeep  and  Trischler, Adam},
  title     = {Machine Comprehension by Text-to-Text Neural Question Generation},
  booktitle = {Proceedings of the 2nd Workshop on Representation Learning for NLP},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {15--25},
  abstract  = {We propose a recurrent neural model that generates natural-language questions
	from documents, conditioned on answers. We show how to train the model using a
	combination of supervised and reinforcement learning. After teacher forcing for
	standard maximum likelihood training, we fine-tune the model using policy
	gradient techniques to maximize several rewards that measure question quality.
	Most notably, one of these rewards is the performance of a question-answering
	system. We motivate question generation as a means to improve the performance
	of question answering systems. Our model is trained and evaluated on the
	recent question-answering dataset SQuAD.},
  url       = {http://www.aclweb.org/anthology/W17-2603}
}

