@InProceedings{karamcheti-EtAl:2017:RoboNLP,
  author    = {Karamcheti, Siddharth  and  Williams, Edward Clem  and  Arumugam, Dilip  and  Rhee, Mina  and  Gopalan, Nakul  and  Wong, Lawson L.S.  and  Tellex, Stefanie},
  title     = {A Tale of Two DRAGGNs: A Hybrid Approach for Interpreting Action-Oriented and Goal-Oriented Instructions},
  booktitle = {Proceedings of the First Workshop on Language Grounding for Robotics},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {67--75},
  abstract  = {Robots operating alongside humans in diverse, stochastic environments must be
	able to accurately interpret natural language commands. These instructions
	often fall into one of two categories: those that specify a goal condition or
	target state, and those that specify explicit actions, or how to perform a
	given task. Recent approaches have used reward functions as a semantic
	representation of goal-based commands, which allows for the use of a
	state-of-the-art planner to find a policy for the given task. However, these
	reward functions cannot be directly used to represent action-oriented commands.
	We introduce a new hybrid approach, the Deep Recurrent Action-Goal Grounding
	Network (DRAGGN), for task grounding and execution that handles natural
	language from either category as input, and generalizes to unseen environments.
	Our robot-simulation results demonstrate that a system successfully
	interpreting both goal-oriented and action-oriented task specifications brings
	us closer to robust natural language understanding for human-robot interaction.},
  url       = {http://www.aclweb.org/anthology/W17-2809}
}

