@InProceedings{misra-langford-artzi:2017:EMNLP2017,
  author    = {Misra, Dipendra  and  Langford, John  and  Artzi, Yoav},
  title     = {Mapping Instructions and Visual Observations to Actions with Reinforcement Learning},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {1004--1015},
  abstract  = {We propose to directly map raw visual observations and text input to actions
	for instruction execution. While existing approaches assume access to
	structured environment representations or use a pipeline of separately trained
	models, we learn a single model to jointly reason about linguistic and visual
	input. We use reinforcement learning in a contextual bandit setting to train a
	neural network agent. To guide the agent's exploration, we use reward shaping
	with different forms of supervision. Our approach does not require intermediate
	representations, planning procedures, or training different models. We evaluate
	in a simulated environment, and show significant improvements over supervised
	learning and common reinforcement learning variants.},
  url       = {https://www.aclweb.org/anthology/D17-1106}
}

