@inproceedings{srinivasan-etal-2019-integrating,
title = "Integrating Lexical Knowledge in Word Embeddings using Sprinkling and Retrofitting",
author = "Srinivasan, Aakash and
Kamarthi, Harshavardhan and
Ganesan, Devi and
Chakraborti, Sutanu",
editor = "Sharma, Dipti Misra and
Bhattacharya, Pushpak",
booktitle = "Proceedings of the 16th International Conference on Natural Language Processing",
month = dec,
year = "2019",
address = "International Institute of Information Technology, Hyderabad, India",
publisher = "NLP Association of India",
url = "https://aclanthology.org/2019.icon-1.13",
pages = "115--123",
abstract = "Neural network based word embeddings, such as Word2Vec and Glove, are purely data driven in that they capture the distributional information about words from the training corpus. Past works have attempted to improve these embeddings by incorporating semantic knowledge from lexical resources like WordNet. Some techniques like retrofitting modify word embeddings in the post-processing stage while some others use a joint learning approach by modifying the objective function of neural networks. In this paper, we discuss two novel approaches for incorporating semantic knowledge into word embeddings. In the first approach, we take advantage of Levy et al{'}s work which showed that using SVD based methods on co-occurrence matrix provide similar performance to neural network based embeddings. We propose a {`}sprinkling{'} technique to add semantic relations to the co-occurrence matrix directly before factorization. In the second approach, WordNet similarity scores are used to improve the retrofitting method. We evaluate the proposed methods in both intrinsic and extrinsic tasks and observe significant improvements over the baselines in many of the datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="srinivasan-etal-2019-integrating">
<titleInfo>
<title>Integrating Lexical Knowledge in Word Embeddings using Sprinkling and Retrofitting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aakash</namePart>
<namePart type="family">Srinivasan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Harshavardhan</namePart>
<namePart type="family">Kamarthi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Devi</namePart>
<namePart type="family">Ganesan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sutanu</namePart>
<namePart type="family">Chakraborti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th International Conference on Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dipti</namePart>
<namePart type="given">Misra</namePart>
<namePart type="family">Sharma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pushpak</namePart>
<namePart type="family">Bhattacharya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>NLP Association of India</publisher>
<place>
<placeTerm type="text">International Institute of Information Technology, Hyderabad, India</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural network based word embeddings, such as Word2Vec and Glove, are purely data driven in that they capture the distributional information about words from the training corpus. Past works have attempted to improve these embeddings by incorporating semantic knowledge from lexical resources like WordNet. Some techniques like retrofitting modify word embeddings in the post-processing stage while some others use a joint learning approach by modifying the objective function of neural networks. In this paper, we discuss two novel approaches for incorporating semantic knowledge into word embeddings. In the first approach, we take advantage of Levy et al’s work which showed that using SVD based methods on co-occurrence matrix provide similar performance to neural network based embeddings. We propose a ‘sprinkling’ technique to add semantic relations to the co-occurrence matrix directly before factorization. In the second approach, WordNet similarity scores are used to improve the retrofitting method. We evaluate the proposed methods in both intrinsic and extrinsic tasks and observe significant improvements over the baselines in many of the datasets.</abstract>
<identifier type="citekey">srinivasan-etal-2019-integrating</identifier>
<location>
<url>https://aclanthology.org/2019.icon-1.13</url>
</location>
<part>
<date>2019-12</date>
<extent unit="page">
<start>115</start>
<end>123</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Integrating Lexical Knowledge in Word Embeddings using Sprinkling and Retrofitting
%A Srinivasan, Aakash
%A Kamarthi, Harshavardhan
%A Ganesan, Devi
%A Chakraborti, Sutanu
%Y Sharma, Dipti Misra
%Y Bhattacharya, Pushpak
%S Proceedings of the 16th International Conference on Natural Language Processing
%D 2019
%8 December
%I NLP Association of India
%C International Institute of Information Technology, Hyderabad, India
%F srinivasan-etal-2019-integrating
%X Neural network based word embeddings, such as Word2Vec and Glove, are purely data driven in that they capture the distributional information about words from the training corpus. Past works have attempted to improve these embeddings by incorporating semantic knowledge from lexical resources like WordNet. Some techniques like retrofitting modify word embeddings in the post-processing stage while some others use a joint learning approach by modifying the objective function of neural networks. In this paper, we discuss two novel approaches for incorporating semantic knowledge into word embeddings. In the first approach, we take advantage of Levy et al’s work which showed that using SVD based methods on co-occurrence matrix provide similar performance to neural network based embeddings. We propose a ‘sprinkling’ technique to add semantic relations to the co-occurrence matrix directly before factorization. In the second approach, WordNet similarity scores are used to improve the retrofitting method. We evaluate the proposed methods in both intrinsic and extrinsic tasks and observe significant improvements over the baselines in many of the datasets.
%U https://aclanthology.org/2019.icon-1.13
%P 115-123
Markdown (Informal)
[Integrating Lexical Knowledge in Word Embeddings using Sprinkling and Retrofitting](https://aclanthology.org/2019.icon-1.13) (Srinivasan et al., ICON 2019)
ACL