@InProceedings{xie-xing:2017:Long,
  author    = {Xie, Pengtao  and  Xing, Eric},
  title     = {A Constituent-Centric Neural Architecture for Reading Comprehension},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1405--1414},
  abstract  = {Reading comprehension (RC), aiming to understand natural texts and answer
	questions therein, is a challenging task. In this paper, we study the RC
	problem on the Stanford Question Answering Dataset (SQuAD). Observing from the
	training set that most correct answers are centered around constituents in the
	parse tree, we design a constituent-centric neural architecture where the
	generation of candidate answers and their representation learning are both
	based on constituents and guided by the parse tree. Under this architecture,
	the search space of candidate answers can be greatly reduced without
	sacrificing the coverage of correct answers and the syntactic, hierarchical and
	compositional structure among constituents can be well captured, which
	contributes to better representation learning of the candidate answers. On
	SQuAD, our method achieves the state of the art performance and the ablation
	study corroborates the effectiveness of individual modules.},
  url       = {http://aclweb.org/anthology/P17-1129}
}

