@InProceedings{shi-EtAl:2017:I17-1,
  author    = {Shi, Wei  and  Yung, Frances  and  Rubino, Raphael  and  Demberg, Vera},
  title     = {Using Explicit Discourse Connectives in Translation for Implicit Discourse Relation Classification},
  booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {484--495},
  abstract  = {Implicit discourse relation recognition is an extremely challenging task due to
	the lack of indicative connectives. Various neural network architectures have
	been proposed for this task recently, but most of them suffer from the shortage
	of labeled data. In this paper, we address this problem by procuring additional
	training data from parallel corpora: When humans translate a text, they
	sometimes add connectives (a process known as \textit{explicitation}). We
	automatically back-translate it into an English connective and use it to infer
	a label with high confidence. We show that a training set several times larger
	than the original training set can be generated this way. With the extra
	labeled instances, we show that even a simple bidirectional Long Short-Term
	Memory Network can outperform the current state-of-the-art.},
  url       = {http://www.aclweb.org/anthology/I17-1049}
}

