@InProceedings{rutherford-demberg-xue:2017:EACLlong,
  author    = {Rutherford, Attapol  and  Demberg, Vera  and  Xue, Nianwen},
  title     = {A Systematic Study of Neural Discourse Models for Implicit Discourse Relation},
  booktitle = {Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {281--291},
  abstract  = {Inferring implicit discourse relations in natural language text is the most
	difficult subtask in discourse parsing. Many neural network models have been
	proposed to tackle this problem. However, the comparison for this task is not
	unified, so we could hardly draw clear conclusions about the effectiveness of
	various architectures. Here, we propose neural network models that are based on
	feedforward and long-short term memory architecture and systematically study
	the effects of varying structures. To our surprise, the best-configured
	feedforward architecture outperforms LSTM-based model in most cases despite
	thorough tuning. Further, we compare our best feedforward system with
	competitive convolutional and recurrent networks and find that feedforward can
	actually be more effective. For the first time for this task, we compile and
	publish outputs from previous neural and non-neural systems to establish the
	standard for further comparison.},
  url       = {http://www.aclweb.org/anthology/E17-1027}
}

