@InProceedings{yavuz-EtAl:2017:EMNLP2017,
  author    = {Yavuz, Semih  and  Gur, Izzeddin  and  Su, Yu  and  Yan, Xifeng},
  title     = {Recovering Question Answering Errors via Query Revision},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {903--909},
  abstract  = {The existing factoid QA systems often
	lack a post-inspection component that can
	help models recover from their own mistakes.
	In this work, we propose to crosscheck
	the corresponding KB relations behind
	the predicted answers and identify
	potential inconsistencies. Instead of developing
	a new model that accepts evidences
	collected from these relations, we choose
	to plug them back to the original questions
	directly and check if the revised question
	makes sense or not. A bidirectional LSTM
	is applied to encode revised questions. We
	develop a scoring mechanism over the revised
	question encodings to refine the predictions
	of a base QA system. This approach
	can improve the F1 score of STAGG
	(Yih et al., 2015), one of the leading QA
	systems, from 52.5% to 53.9% on WEBQUESTIONS
	data.},
  url       = {https://www.aclweb.org/anthology/D17-1094}
}

