@inproceedings{almeman-espinosa-anke-2022-putting,
title = "Putting {W}ord{N}et{'}s Dictionary Examples in the Context of Definition Modelling: An Empirical Analysis",
author = "Almeman, Fatemah and
Espinosa Anke, Luis",
editor = "Zock, Michael and
Chersoni, Emmanuele and
Hsu, Yu-Yin and
Santus, Enrico",
booktitle = "Proceedings of the Workshop on Cognitive Aspects of the Lexicon",
month = nov,
year = "2022",
address = "Taipei, Taiwan",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.cogalex-1.6",
doi = "10.18653/v1/2022.cogalex-1.6",
pages = "42--48",
abstract = "Definition modeling is the task to generate a valid definition for a given input term. This relatively novel task has been approached either with no context (i.e., given a word embedding alone) and, more recently, as word-in-context modeling. Despite their success, most works make little to no distinction between resources and their specific features (e.g., type and style of definitions, or quality of examples) when used for training. Given the high diversity lexicographic resources exhibit in terms of topic coverage, style and formal structure, it is desirable for downstream definition modeling to better understand which of them are better suited for the task. In this paper, we propose an empirical evaluation of the well-known lexical database WordNet, and specifically, its dictionary examples. We evaluate them both directly, by matching them against criteria for good dictionary writing, and indirectly, in the task of definition modeling. Our results suggest that WordNet{'}s dictionary examples could be improved by extending them in length, and incorporating prototypicality.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="almeman-espinosa-anke-2022-putting">
<titleInfo>
<title>Putting WordNet’s Dictionary Examples in the Context of Definition Modelling: An Empirical Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fatemah</namePart>
<namePart type="family">Almeman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Espinosa Anke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Cognitive Aspects of the Lexicon</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Zock</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emmanuele</namePart>
<namePart type="family">Chersoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yu-Yin</namePart>
<namePart type="family">Hsu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Enrico</namePart>
<namePart type="family">Santus</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Taipei, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Definition modeling is the task to generate a valid definition for a given input term. This relatively novel task has been approached either with no context (i.e., given a word embedding alone) and, more recently, as word-in-context modeling. Despite their success, most works make little to no distinction between resources and their specific features (e.g., type and style of definitions, or quality of examples) when used for training. Given the high diversity lexicographic resources exhibit in terms of topic coverage, style and formal structure, it is desirable for downstream definition modeling to better understand which of them are better suited for the task. In this paper, we propose an empirical evaluation of the well-known lexical database WordNet, and specifically, its dictionary examples. We evaluate them both directly, by matching them against criteria for good dictionary writing, and indirectly, in the task of definition modeling. Our results suggest that WordNet’s dictionary examples could be improved by extending them in length, and incorporating prototypicality.</abstract>
<identifier type="citekey">almeman-espinosa-anke-2022-putting</identifier>
<identifier type="doi">10.18653/v1/2022.cogalex-1.6</identifier>
<location>
<url>https://aclanthology.org/2022.cogalex-1.6</url>
</location>
<part>
<date>2022-11</date>
<extent unit="page">
<start>42</start>
<end>48</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Putting WordNet’s Dictionary Examples in the Context of Definition Modelling: An Empirical Analysis
%A Almeman, Fatemah
%A Espinosa Anke, Luis
%Y Zock, Michael
%Y Chersoni, Emmanuele
%Y Hsu, Yu-Yin
%Y Santus, Enrico
%S Proceedings of the Workshop on Cognitive Aspects of the Lexicon
%D 2022
%8 November
%I Association for Computational Linguistics
%C Taipei, Taiwan
%F almeman-espinosa-anke-2022-putting
%X Definition modeling is the task to generate a valid definition for a given input term. This relatively novel task has been approached either with no context (i.e., given a word embedding alone) and, more recently, as word-in-context modeling. Despite their success, most works make little to no distinction between resources and their specific features (e.g., type and style of definitions, or quality of examples) when used for training. Given the high diversity lexicographic resources exhibit in terms of topic coverage, style and formal structure, it is desirable for downstream definition modeling to better understand which of them are better suited for the task. In this paper, we propose an empirical evaluation of the well-known lexical database WordNet, and specifically, its dictionary examples. We evaluate them both directly, by matching them against criteria for good dictionary writing, and indirectly, in the task of definition modeling. Our results suggest that WordNet’s dictionary examples could be improved by extending them in length, and incorporating prototypicality.
%R 10.18653/v1/2022.cogalex-1.6
%U https://aclanthology.org/2022.cogalex-1.6
%U https://doi.org/10.18653/v1/2022.cogalex-1.6
%P 42-48
Markdown (Informal)
[Putting WordNet’s Dictionary Examples in the Context of Definition Modelling: An Empirical Analysis](https://aclanthology.org/2022.cogalex-1.6) (Almeman & Espinosa Anke, CogALex 2022)
ACL