@inproceedings{janzso-2021-disambiguating-grammatical,
title = "Disambiguating Grammatical Number and Gender With {BERT}",
author = "Janzso, Annegret",
editor = "Djabri, Souhila and
Gimadi, Dinara and
Mihaylova, Tsvetomila and
Nikolova-Koleva, Ivelina",
booktitle = "Proceedings of the Student Research Workshop Associated with RANLP 2021",
month = sep,
year = "2021",
address = "Online",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/2021.ranlp-srw.11",
pages = "69--77",
abstract = "Accurately dealing with any type of ambiguity is a major task in Natural Language Processing, with great advances recently reached due to the development of context dependent language models and the use of word or sentence embeddings. In this context, our work aimed at determining how the popular language representation model BERT handle ambiguity of nouns in grammatical number and gender in different languages. We show that models trained on one specific language achieve better results for the disambiguation process than multilingual models. Also, ambiguity is generally better dealt with in grammatical number than it is in grammatical gender, reaching greater distance values from one to another in direct comparisons of individual senses. The overall results show also that the amount of data needed for training monolingual models as well as application should not be underestimated.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="janzso-2021-disambiguating-grammatical">
<titleInfo>
<title>Disambiguating Grammatical Number and Gender With BERT</title>
</titleInfo>
<name type="personal">
<namePart type="given">Annegret</namePart>
<namePart type="family">Janzso</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Student Research Workshop Associated with RANLP 2021</title>
</titleInfo>
<name type="personal">
<namePart type="given">Souhila</namePart>
<namePart type="family">Djabri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dinara</namePart>
<namePart type="family">Gimadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tsvetomila</namePart>
<namePart type="family">Mihaylova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivelina</namePart>
<namePart type="family">Nikolova-Koleva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Accurately dealing with any type of ambiguity is a major task in Natural Language Processing, with great advances recently reached due to the development of context dependent language models and the use of word or sentence embeddings. In this context, our work aimed at determining how the popular language representation model BERT handle ambiguity of nouns in grammatical number and gender in different languages. We show that models trained on one specific language achieve better results for the disambiguation process than multilingual models. Also, ambiguity is generally better dealt with in grammatical number than it is in grammatical gender, reaching greater distance values from one to another in direct comparisons of individual senses. The overall results show also that the amount of data needed for training monolingual models as well as application should not be underestimated.</abstract>
<identifier type="citekey">janzso-2021-disambiguating-grammatical</identifier>
<location>
<url>https://aclanthology.org/2021.ranlp-srw.11</url>
</location>
<part>
<date>2021-09</date>
<extent unit="page">
<start>69</start>
<end>77</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Disambiguating Grammatical Number and Gender With BERT
%A Janzso, Annegret
%Y Djabri, Souhila
%Y Gimadi, Dinara
%Y Mihaylova, Tsvetomila
%Y Nikolova-Koleva, Ivelina
%S Proceedings of the Student Research Workshop Associated with RANLP 2021
%D 2021
%8 September
%I INCOMA Ltd.
%C Online
%F janzso-2021-disambiguating-grammatical
%X Accurately dealing with any type of ambiguity is a major task in Natural Language Processing, with great advances recently reached due to the development of context dependent language models and the use of word or sentence embeddings. In this context, our work aimed at determining how the popular language representation model BERT handle ambiguity of nouns in grammatical number and gender in different languages. We show that models trained on one specific language achieve better results for the disambiguation process than multilingual models. Also, ambiguity is generally better dealt with in grammatical number than it is in grammatical gender, reaching greater distance values from one to another in direct comparisons of individual senses. The overall results show also that the amount of data needed for training monolingual models as well as application should not be underestimated.
%U https://aclanthology.org/2021.ranlp-srw.11
%P 69-77
Markdown (Informal)
[Disambiguating Grammatical Number and Gender With BERT](https://aclanthology.org/2021.ranlp-srw.11) (Janzso, RANLP 2021)
ACL