@InProceedings{tissier-gravier-habrard:2017:EMNLP2017,
  author    = {Tissier, Julien  and  Gravier, Christopher  and  Habrard, Amaury},
  title     = {Dict2vec : Learning Word Embeddings using Lexical Dictionaries},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {254--263},
  abstract  = {Learning word embeddings on large unlabeled corpus has been shown to be
	successful in improving many natural language tasks. The most efficient and
	popular approaches learn or retrofit such representations using additional
	external data. Resulting embeddings are generally better than their corpus-only
	counterparts, although such resources cover a fraction of words in the
	vocabulary. In this paper, we propose a new approach, Dict2vec, based on one of
	the largest yet refined datasource for describing words -- natural language
	dictionaries. Dict2vec builds new word pairs from dictionary entries so that
	semantically-related words are moved closer, and negative sampling filters out
	pairs whose words are unrelated in dictionaries. We evaluate the word
	representations obtained using Dict2vec on eleven datasets for the word
	similarity task and on four datasets for a text classification task.},
  url       = {https://www.aclweb.org/anthology/D17-1024}
}

