@InProceedings{khashabi-EtAl:2017:CoNLL,
  author    = {Khashabi, Daniel  and  Khot, Tushar  and  Sabharwal, Ashish  and  Roth, Dan},
  title     = {Learning What is Essential in Questions},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {80--89},
  abstract  = {Question answering (QA) systems are easily  distracted by  irrelevant or 
	redundant words in questions, especially when faced with  long                       
	  or 
	multi-sentence                          questions  in difficult  domains. This       
	     
	paper 
	introduces and
	 studies  the  notion  of essential  question  terms with  the goal  of 
	improving such QA  solvers. We                          illustrate the             
	importance            of               
	  essential 
	question  terms  by showing  that  humans’  ability  to  answer questions
	drops significantly when essential  terms  are eliminated  from  questions.We
	then develop a classifier that reliably (90%  mean  average  precision) 
	identifies and ranks essential terms in questions. Finally, we use the
	classifier to demonstrate that                          the  notion  of  question 
	term 
	essentiality 
	allows state-of-the-art  QA  solver for  elementary-level  science  questions 
	to make better and more informed decisions,improving performance by up to 5%.We
	 also  introduce  a  new  dataset  of  over 2,200 crowd-sourced essential terms
	annotated science questions.},
  url       = {http://aclweb.org/anthology/K17-1010}
}

