@InProceedings{papay-pad-vu:2018:W18-12,
  author    = {Papay, Sean  and  Padó, Sebastian  and  Vu, Ngoc Thang},
  title     = {Addressing Low-Resource Scenarios with Character-aware Embeddings},
  booktitle = {Proceedings of the Second Workshop on Subword/Character LEvel Models},
  month     = {June},
  year      = {2018},
  address   = {New Orleans},
  publisher = {Association for Computational Linguistics},
  pages     = {32--37},
  abstract  = {Most modern approaches to computing word embeddings assume the availability of text corpora with billions of words. In this paper, we explore a setup where only corpora with millions of words are available, and many words in any new text are out of vocabulary. This setup is both of practical interests -- modeling the situation for specific domains and low-resource languages -- and of psycholinguistic interest, since it corresponds much more closely to the actual experiences and challenges of human language learning and use. We compare standard skip-gram word embeddings with character-based embeddings on word relatedness prediction. Skip-grams excel on large corpora, while character-based embeddings do well on small corpora generally and rare and complex words specifically. The models can be combined easily.},
  url       = {http://www.aclweb.org/anthology/W18-1204}
}

