@InProceedings{lin-EtAl:2018:Long2,
  author    = {Lin, Ying  and  Yang, Shengqi  and  Stoyanov, Veselin  and  Ji, Heng},
  title     = {A Multi-lingual Multi-task Architecture for Low-resource Sequence Labeling},
  booktitle = {Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2018},
  address   = {Melbourne, Australia},
  publisher = {Association for Computational Linguistics},
  pages     = {799--809},
  abstract  = {We propose a multi-lingual multi-task architecture to develop supervised models with a minimal amount of labeled data for sequence labeling. In this new architecture, we combine various transfer models using two layers of parameter sharing. On the first layer, we construct the basis of the architecture to provide universal word representation and feature extraction capability for all models. On the second level, we adopt different parameter sharing strategies for different transfer schemes. This architecture proves to be particularly effective for low-resource settings, when there are less than 200 training sentences for the target task. Using Name Tagging as a target task, our approach achieved 4.3%-50.5% absolute F-score gains compared to the mono-lingual single-task baseline model.},
  url       = {http://www.aclweb.org/anthology/P18-1074}
}

