@InProceedings{malaviya-neubig-littell:2017:EMNLP2017,
  author    = {Malaviya, Chaitanya  and  Neubig, Graham  and  Littell, Patrick},
  title     = {Learning Language Representations for Typology Prediction},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2529--2535},
  abstract  = {One central mystery of neural NLP is what neural models "know" about their
	subject matter. When a neural machine translation system learns to translate
	from one language to another, does it learn the syntax or semantics of the
	languages? Can this knowledge be extracted from the system to fill holes in
	human scientific knowledge? Existing typological databases contain relatively
	full feature specifications for only a few hundred languages. Exploiting the
	existence of parallel texts in more than a thousand languages, we build a
	massive many-to-one NMT system from 1017 languages into English, and use this
	to predict information missing from typological databases. Experiments show
	that the proposed method is able to infer not only syntactic, but also
	phonological and phonetic inventory features, and improves over a baseline that
	has access to information about the languages geographic and phylogenetic
	neighbors.},
  url       = {https://www.aclweb.org/anthology/D17-1268}
}

