@inproceedings{lekkas-etal-2022-multi,
title = "Multi-sense Language Modelling",
author = "Lekkas, Andrea and
Schneider-Kamp, Peter and
Augenstein, Isabelle",
editor = "Baker, Collin F.",
booktitle = "Proceedings of the Workshop on Dimensions of Meaning: Distributional and Curated Semantics (DistCurate 2022)",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.distcurate-1.2",
doi = "10.18653/v1/2022.distcurate-1.2",
pages = "8--18",
abstract = "The effectiveness of a language model is influenced by its token representations, which must encode contextual information and handle the same word form having a plurality of meanings (polysemy). Currently, none of the common language modelling architectures explicitly model polysemy. We propose a language model which not only predicts the next word, but also its sense in context. We argue that this higher prediction granularity may be useful for end tasks such as assistive writing, and allow for more a precise linking of language models with knowledge bases. We find that multi-sense language modelling requires architectures that go beyond standard language models, and here propose a localized prediction framework that decomposes the task into a word followed by a sense prediction task. To aid sense prediction, we utilise a Graph Attention Network, which encodes definitions and example uses of word senses. Overall, we find that multi-sense language modelling is a highly challenging task, and suggest that future work focus on the creation of more annotated training datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lekkas-etal-2022-multi">
<titleInfo>
<title>Multi-sense Language Modelling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andrea</namePart>
<namePart type="family">Lekkas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Peter</namePart>
<namePart type="family">Schneider-Kamp</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Workshop on Dimensions of Meaning: Distributional and Curated Semantics (DistCurate 2022)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Collin</namePart>
<namePart type="given">F</namePart>
<namePart type="family">Baker</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Seattle, Washington</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The effectiveness of a language model is influenced by its token representations, which must encode contextual information and handle the same word form having a plurality of meanings (polysemy). Currently, none of the common language modelling architectures explicitly model polysemy. We propose a language model which not only predicts the next word, but also its sense in context. We argue that this higher prediction granularity may be useful for end tasks such as assistive writing, and allow for more a precise linking of language models with knowledge bases. We find that multi-sense language modelling requires architectures that go beyond standard language models, and here propose a localized prediction framework that decomposes the task into a word followed by a sense prediction task. To aid sense prediction, we utilise a Graph Attention Network, which encodes definitions and example uses of word senses. Overall, we find that multi-sense language modelling is a highly challenging task, and suggest that future work focus on the creation of more annotated training datasets.</abstract>
<identifier type="citekey">lekkas-etal-2022-multi</identifier>
<identifier type="doi">10.18653/v1/2022.distcurate-1.2</identifier>
<location>
<url>https://aclanthology.org/2022.distcurate-1.2</url>
</location>
<part>
<date>2022-07</date>
<extent unit="page">
<start>8</start>
<end>18</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-sense Language Modelling
%A Lekkas, Andrea
%A Schneider-Kamp, Peter
%A Augenstein, Isabelle
%Y Baker, Collin F.
%S Proceedings of the Workshop on Dimensions of Meaning: Distributional and Curated Semantics (DistCurate 2022)
%D 2022
%8 July
%I Association for Computational Linguistics
%C Seattle, Washington
%F lekkas-etal-2022-multi
%X The effectiveness of a language model is influenced by its token representations, which must encode contextual information and handle the same word form having a plurality of meanings (polysemy). Currently, none of the common language modelling architectures explicitly model polysemy. We propose a language model which not only predicts the next word, but also its sense in context. We argue that this higher prediction granularity may be useful for end tasks such as assistive writing, and allow for more a precise linking of language models with knowledge bases. We find that multi-sense language modelling requires architectures that go beyond standard language models, and here propose a localized prediction framework that decomposes the task into a word followed by a sense prediction task. To aid sense prediction, we utilise a Graph Attention Network, which encodes definitions and example uses of word senses. Overall, we find that multi-sense language modelling is a highly challenging task, and suggest that future work focus on the creation of more annotated training datasets.
%R 10.18653/v1/2022.distcurate-1.2
%U https://aclanthology.org/2022.distcurate-1.2
%U https://doi.org/10.18653/v1/2022.distcurate-1.2
%P 8-18
Markdown (Informal)
[Multi-sense Language Modelling](https://aclanthology.org/2022.distcurate-1.2) (Lekkas et al., DistCurate 2022)
ACL
- Andrea Lekkas, Peter Schneider-Kamp, and Isabelle Augenstein. 2022. Multi-sense Language Modelling. In Proceedings of the Workshop on Dimensions of Meaning: Distributional and Curated Semantics (DistCurate 2022), pages 8–18, Seattle, Washington. Association for Computational Linguistics.