@InProceedings{sato-EtAl:2017:K17-3,
  author    = {Sato, Motoki  and  Manabe, Hitoshi  and  Noji, Hiroshi  and  Matsumoto, Yuji},
  title     = {Adversarial Training for Cross-Domain Universal Dependency Parsing},
  booktitle = {Proceedings of the CoNLL 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {71--79},
  abstract  = {We describe our submission to the CoNLL 2017 shared task, which exploits the
	shared common knowledge of a language across different domains via a domain
	adaptation technique.
	Our approach is an extension to the recently proposed adversarial training
	technique for domain adaptation, which we apply on top of a graph-based neural
	dependency parsing model on bidirectional LSTMs.
	In our experiments, we find our baseline graph-based parser already outperforms
	the official baseline model (UDPipe) by a large margin.
	Further, by applying our technique to the treebanks of the same language with
	different domains, we observe an additional gain in the performance, in
	particular for the domains with less training data.},
  url       = {http://www.aclweb.org/anthology/K17-3007}
}

