@InProceedings{fan-EtAl:2017:RepL4NLP,
  author    = {Fan, Xing  and  Monti, Emilio  and  Mathias, Lambert  and  Dreyer, Markus},
  title     = {Transfer Learning for Neural Semantic Parsing},
  booktitle = {Proceedings of the 2nd Workshop on Representation Learning for NLP},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {48--56},
  abstract  = {The goal of semantic parsing is to map natural language to a machine
	interpretable meaning representation language (MRL). One of the constraints
	that limits full exploration of deep learning technologies for semantic parsing
	is the lack of sufficient annotation training data. In this paper, we propose
	using sequence-to-sequence in a multi-task setup for semantic parsing with
	focus on transfer learning. We explore three multi-task architectures for
	sequence-to-sequence model and compare their performance with the independently
	trained model. Our experiments show that the multi-task setup aids transfer
	learning from an auxiliary task with large labeled data to the target task with
	smaller labeled data. We see an absolute accuracy gain ranging from 1.0% to
	4.4% in in our in-house data set and we also see good gains ranging from 2.5%
	to 7.0% on the ATIS semantic parsing tasks with syntactic and semantic
	auxiliary tasks.},
  url       = {http://www.aclweb.org/anthology/W17-2607}
}

