@InProceedings{adams-EtAl:2017:EACLlong,
  author    = {Adams, Oliver  and  Makarucha, Adam  and  Neubig, Graham  and  Bird, Steven  and  Cohn, Trevor},
  title     = {Cross-Lingual Word Embeddings for Low-Resource Language Modeling},
  booktitle = {Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {937--947},
  abstract  = {Most languages have no established writing system and minimal written records.
	However, textual data is essential for natural language processing, and
	particularly important for training language models to support speech
	recognition. Even in cases where text data is missing, there are some languages
	for which bilingual lexicons are available, since creating lexicons is a
	fundamental task of documentary linguistics.  We investigate the use of such
	lexicons to improve language models when textual training data is limited to as
	few as a thousand sentences. The method involves learning cross-lingual word
	embeddings as a preliminary step in training monolingual language models.
	Results across a number of languages show that language models are improved by
	this pre-training. Application to Yongning Na, a threatened language,
	highlights challenges in deploying the approach in real low-resource
	environments.},
  url       = {http://www.aclweb.org/anthology/E17-1088}
}

