@inproceedings{zhou-bollegala-2022-curious,
title = "On the Curious Case of l2 norm of Sense Embeddings",
author = "Zhou, Yi and
Bollegala, Danushka",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-emnlp.190",
doi = "10.18653/v1/2022.findings-emnlp.190",
pages = "2593--2602",
abstract = "We show that the l2 norm of a static sense embedding encodes information related to the frequency of that sense in the training corpus used to learn the sense embeddings. This finding can be seen as an extension of a previously known relationship for word embeddings to sense embeddings. Our experimental results show that in spite of its simplicity, the l2 norm of sense embeddings is a surprisingly effective feature for several word sense related tasks such as (a) most frequent sense prediction, (b) word-in-context (WiC), and (c) word sense disambiguation (WSD). In particular, by simply including the l2 norm of a sense embedding as a feature in a classifier, we show that we can improve WiC and WSD methods that use static sense embeddings.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-bollegala-2022-curious">
<titleInfo>
<title>On the Curious Case of l2 norm of Sense Embeddings</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yi</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Danushka</namePart>
<namePart type="family">Bollegala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2022</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We show that the l2 norm of a static sense embedding encodes information related to the frequency of that sense in the training corpus used to learn the sense embeddings. This finding can be seen as an extension of a previously known relationship for word embeddings to sense embeddings. Our experimental results show that in spite of its simplicity, the l2 norm of sense embeddings is a surprisingly effective feature for several word sense related tasks such as (a) most frequent sense prediction, (b) word-in-context (WiC), and (c) word sense disambiguation (WSD). In particular, by simply including the l2 norm of a sense embedding as a feature in a classifier, we show that we can improve WiC and WSD methods that use static sense embeddings.</abstract>
<identifier type="citekey">zhou-bollegala-2022-curious</identifier>
<identifier type="doi">10.18653/v1/2022.findings-emnlp.190</identifier>
<location>
<url>https://aclanthology.org/2022.findings-emnlp.190</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>2593</start>
<end>2602</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On the Curious Case of l2 norm of Sense Embeddings
%A Zhou, Yi
%A Bollegala, Danushka
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Findings of the Association for Computational Linguistics: EMNLP 2022
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F zhou-bollegala-2022-curious
%X We show that the l2 norm of a static sense embedding encodes information related to the frequency of that sense in the training corpus used to learn the sense embeddings. This finding can be seen as an extension of a previously known relationship for word embeddings to sense embeddings. Our experimental results show that in spite of its simplicity, the l2 norm of sense embeddings is a surprisingly effective feature for several word sense related tasks such as (a) most frequent sense prediction, (b) word-in-context (WiC), and (c) word sense disambiguation (WSD). In particular, by simply including the l2 norm of a sense embedding as a feature in a classifier, we show that we can improve WiC and WSD methods that use static sense embeddings.
%R 10.18653/v1/2022.findings-emnlp.190
%U https://aclanthology.org/2022.findings-emnlp.190
%U https://doi.org/10.18653/v1/2022.findings-emnlp.190
%P 2593-2602
Markdown (Informal)
[On the Curious Case of l2 norm of Sense Embeddings](https://aclanthology.org/2022.findings-emnlp.190) (Zhou & Bollegala, Findings 2022)
ACL
- Yi Zhou and Danushka Bollegala. 2022. On the Curious Case of l2 norm of Sense Embeddings. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 2593–2602, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.