@InProceedings{choi-EtAl:2017:Long,
  author    = {Choi, Eunsol  and  Hewlett, Daniel  and  Uszkoreit, Jakob  and  Polosukhin, Illia  and  Lacoste, Alexandre  and  Berant, Jonathan},
  title     = {Coarse-to-Fine Question Answering for Long Documents},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {209--220},
  abstract  = {We present a framework for question answering that can efficiently scale to
	longer documents while maintaining or even improving performance of
	state-of-the-art models. While most successful approaches for reading
	comprehension rely on recurrent neural networks (RNNs), running them over long
	documents is prohibitively slow because it is difficult to parallelize over
	sequences. Inspired by how people first skim the document, identify relevant
	parts, and carefully read these parts to produce an answer, we combine a
	coarse, fast model for selecting relevant sentences and a more expensive RNN
	for producing the answer from those sentences.
	We treat sentence selection as a latent variable trained jointly from the
	answer only using reinforcement learning. Experiments demonstrate
	state-of-the-art performance on a challenging subset of the WikiReading dataset
	and on a new dataset, while speeding up the model by 3.5x-6.7x.},
  url       = {http://aclweb.org/anthology/P17-1020}
}

