@InProceedings{liang-EtAl:2017:Long,
  author    = {Liang, Chen  and  Berant, Jonathan  and  Le, Quoc  and  Forbus, Kenneth D.  and  Lao, Ni},
  title     = {Neural Symbolic Machines: Learning Semantic Parsers on Freebase with Weak Supervision},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {23--33},
  abstract  = {Harnessing the statistical power of neural networks to perform language
	understanding and symbolic reasoning is difficult, when it requires executing
	efficient discrete operations against a large knowledge-base. In this work, we
	introduce a Neural Symbolic Machine, which contains (a) a neural "programmer",
	i.e., a sequence-to-sequence model that maps language utterances to programs
	and utilizes a key-variable memory to handle compositionality (b) a symbolic
	"computer", i.e., a Lisp interpreter that performs program execution, and helps
	find good programs by pruning the search space. We apply REINFORCE to directly
	optimize the task reward of this structured prediction problem. To train with
	weak supervision and improve the stability of REINFORCE, we augment it with an
	iterative maximum-likelihood training process. NSM outperforms the
	state-of-the-art on the WebQuestionsSP dataset when trained from
	question-answer pairs only, without requiring any feature engineering or
	domain-specific knowledge.},
  url       = {http://aclweb.org/anthology/P17-1003}
}

