@InProceedings{mancini-EtAl:2017:CoNLL,
  author    = {Mancini, Massimiliano  and  Camacho-Collados, Jose  and  Iacobacci, Ignacio  and  Navigli, Roberto},
  title     = {Embedding Words and Senses Together via Joint Knowledge-Enhanced Training},
  booktitle = {Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {100--111},
  abstract  = {Word embeddings are widely used in Natural Language Processing, mainly due to
	their success in capturing semantic information from massive corpora. However,
	their creation process does not allow the different meanings of a word to be
	automatically separated, as it conflates them into a single vector. We address
	this issue by proposing a new model which learns word and sense embeddings
	jointly. Our model exploits large corpora and knowledge from semantic networks
	in order to produce a unified vector space of word and sense embeddings. We
	evaluate the main features of our approach both qualitatively and
	quantitatively in a variety of tasks, highlighting the advantages of the
	proposed method in comparison to state-of-the-art word- and sense-based models.},
  url       = {http://aclweb.org/anthology/K17-1012}
}

