@InProceedings{le-fokkens:2017:EACLlong,
  author    = {Le, Minh  and  Fokkens, Antske},
  title     = {Tackling Error Propagation through Reinforcement Learning: A Case of Greedy Dependency Parsing},
  booktitle = {Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {677--687},
  abstract  = {Error propagation is a common problem in NLP. Reinforcement learning explores
	erroneous states during training and can therefore be more robust when mistakes
	are made early in a process. In this paper, we apply reinforcement learning to
	greedy dependency parsing which is known to suffer from error propagation.
	Reinforcement learning improves accuracy of both labeled and unlabeled
	dependencies of the Stanford Neural Dependency Parser, a high performance
	greedy parser, while maintaining its efficiency. We investigate the portion of
	errors which are the result of error propagation and confirm that reinforcement
	learning reduces the occurrence of error propagation.},
  url       = {http://www.aclweb.org/anthology/E17-1064}
}

