@InProceedings{lin-EtAl:2018:LOUHI,
  author    = {Lin, Chen  and  Miller, Timothy  and  Dligach, Dmitriy  and  Amiri, Hadi  and  Bethard, Steven  and  Savova, Guergana},
  title     = {Self-training improves Recurrent Neural Networks performance for Temporal Relation Extraction},
  booktitle = {Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis},
  month     = {October},
  year      = {2018},
  address   = {Brussels, Belgium},
  publisher = {Association for Computational Linguistics},
  pages     = {165--176},
  abstract  = {Neural network models are oftentimes restricted by limited labeled instances and resort to advanced architectures and features for cutting edge performance. We propose to build a recurrent neural network with multiple semantically heterogeneous embeddings within a self-training framework. Our framework makes use of labeled, unlabeled, and social media data, operates on basic features, and is scalable and generalizable. With this method, we establish the state-of-the-art result for both in- and cross-domain for a clinical temporal relation extraction task.},
  url       = {http://www.aclweb.org/anthology/W18-5619}
}

