@inproceedings{popov-2017-word,
title = "Word Sense Disambiguation with Recurrent Neural Networks",
author = "Popov, Alexander",
editor = "Kovatchev, Venelin and
Temnikova, Irina and
Gencheva, Pepa and
Kiprov, Yasen and
Nikolova, Ivelina",
booktitle = "Proceedings of the Student Research Workshop Associated with {RANLP} 2017",
month = sep,
year = "2017",
address = "Varna",
publisher = "INCOMA Ltd.",
url = "https://doi.org/10.26615/issn.1314-9156.2017_004",
doi = "10.26615/issn.1314-9156.2017_004",
pages = "25--34",
abstract = "This paper presents a neural network architecture for word sense disambiguation (WSD). The architecture employs recurrent neural layers and more specifically LSTM cells, in order to capture information about word order and to easily incorporate distributed word representations (embeddings) as features, without having to use a fixed window of text. The paper demonstrates that the architecture is able to compete with the most successful supervised systems for WSD and that there is an abundance of possible improvements to take it to the current state of the art. In addition, it explores briefly the potential of combining different types of embeddings as input features; it also discusses possible ways for generating {``}artificial corpora{''} from knowledge bases {--} for the purpose of producing training data and in relation to possible applications of embedding lemmas and word senses in the same space.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="popov-2017-word">
<titleInfo>
<title>Word Sense Disambiguation with Recurrent Neural Networks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Popov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Student Research Workshop Associated with RANLP 2017</title>
</titleInfo>
<name type="personal">
<namePart type="given">Venelin</namePart>
<namePart type="family">Kovatchev</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Irina</namePart>
<namePart type="family">Temnikova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pepa</namePart>
<namePart type="family">Gencheva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yasen</namePart>
<namePart type="family">Kiprov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivelina</namePart>
<namePart type="family">Nikolova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Varna</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper presents a neural network architecture for word sense disambiguation (WSD). The architecture employs recurrent neural layers and more specifically LSTM cells, in order to capture information about word order and to easily incorporate distributed word representations (embeddings) as features, without having to use a fixed window of text. The paper demonstrates that the architecture is able to compete with the most successful supervised systems for WSD and that there is an abundance of possible improvements to take it to the current state of the art. In addition, it explores briefly the potential of combining different types of embeddings as input features; it also discusses possible ways for generating “artificial corpora” from knowledge bases – for the purpose of producing training data and in relation to possible applications of embedding lemmas and word senses in the same space.</abstract>
<identifier type="citekey">popov-2017-word</identifier>
<identifier type="doi">10.26615/issn.1314-9156.2017_004</identifier>
<part>
<date>2017-09</date>
<extent unit="page">
<start>25</start>
<end>34</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Word Sense Disambiguation with Recurrent Neural Networks
%A Popov, Alexander
%Y Kovatchev, Venelin
%Y Temnikova, Irina
%Y Gencheva, Pepa
%Y Kiprov, Yasen
%Y Nikolova, Ivelina
%S Proceedings of the Student Research Workshop Associated with RANLP 2017
%D 2017
%8 September
%I INCOMA Ltd.
%C Varna
%F popov-2017-word
%X This paper presents a neural network architecture for word sense disambiguation (WSD). The architecture employs recurrent neural layers and more specifically LSTM cells, in order to capture information about word order and to easily incorporate distributed word representations (embeddings) as features, without having to use a fixed window of text. The paper demonstrates that the architecture is able to compete with the most successful supervised systems for WSD and that there is an abundance of possible improvements to take it to the current state of the art. In addition, it explores briefly the potential of combining different types of embeddings as input features; it also discusses possible ways for generating “artificial corpora” from knowledge bases – for the purpose of producing training data and in relation to possible applications of embedding lemmas and word senses in the same space.
%R 10.26615/issn.1314-9156.2017_004
%U https://doi.org/10.26615/issn.1314-9156.2017_004
%P 25-34
Markdown (Informal)
[Word Sense Disambiguation with Recurrent Neural Networks](https://doi.org/10.26615/issn.1314-9156.2017_004) (Popov, RANLP 2017)
ACL