@InProceedings{braud-plank-sogaard:2016:COLING,
  author    = {Braud, Chlo\'{e}  and  Plank, Barbara  and  S{\o}gaard, Anders},
  title     = {Multi-view and multi-task training of RST discourse parsers},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {1903--1913},
  abstract  = {We experiment with different ways of training LSTM networks to predict RST
	discourse trees. The main challenge for RST discourse parsing is the limited
	amounts of training data. We combat this by regularizing our models using task
	supervision from related tasks as well as alternative views on discourse
	structures. We show that a simple LSTM sequential discourse parser takes
	advantage of this multi-view and multi-task framework with 12-15% error
	reductions over our baseline (depending on the metric) and results that rival
	more complex state-of-the-art parsers.},
  url       = {http://aclweb.org/anthology/C16-1179}
}

