@InProceedings{nogueira-cho:2017:EMNLP2017,
  author    = {Nogueira, Rodrigo  and  Cho, Kyunghyun},
  title     = {Task-Oriented Query Reformulation with Reinforcement Learning},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {574--583},
  abstract  = {Search engines play an important role in our everyday lives by assisting us in
	finding the information we need. When we input a complex query, however,
	results are often far from satisfactory. In this work, we introduce a query
	reformulation system based on a neural network that rewrites a query to
	maximize the number of relevant documents returned.
	We train this neural network with reinforcement learning. The actions
	correspond to selecting terms to build a reformulated query, and the reward is
	the document recall. We evaluate our approach on three datasets against strong
	baselines and show a relative improvement of 5-20% in terms of recall.
	Furthermore, we present a simple method to estimate a conservative upper-bound
	performance of a model in a particular environment and verify that there is
	still large room for improvements.},
  url       = {https://www.aclweb.org/anthology/D17-1061}
}

