@InProceedings{popov:2017:RANLPStud,
  author    = {Popov, Alexander},
  title     = {Word Sense Disambiguation with Recurrent Neural Networks},
  booktitle = {Proceedings of the Student Research Workshop Associated with RANLP 2017},
  month     = {September},
  year      = {2017},
  address   = {Varna},
  publisher = {INCOMA Ltd.},
  pages     = {25--34},
  abstract  = {This paper presents a neural network architecture for word sense disambiguation
	(WSD). The architecture employs recurrent neural layers and more specifically
	LSTM cells, in order to capture information about word order and to easily
	incorporate distributed word representations (embeddings) as features, without
	having to use a fixed window of text. The paper demonstrates that the
	architecture is able to compete with the most successful supervised systems for
	WSD and that there is an abundance of possible improvements to take it to the
	current state of the art. In addition, it explores briefly the potential of
	combining different types of embeddings as input features; it also discusses
	possible ways for generating "artificial corpora" from knowledge bases -- for
	the purpose of producing training data and in relation to possible applications
	of embedding lemmas and word senses in the same space.},
  url       = {https://doi.org/10.26615/issn.1314-9156.2017_004}
}

