@inproceedings{dorkin-etal-2024-erzya,
title = "On {E}rzya and {M}oksha Corpora and Analyzer Development, {ERME}-{PSLA} 1950s",
author = "Dorkin, Aleksei and
Purason, Taido and
Sirts, Kairit",
editor = {H{\"a}m{\"a}l{\"a}inen, Mika and
Pirinen, Flammie and
Macias, Melany and
Crespo Avila, Mario},
booktitle = "Proceedings of the 9th International Workshop on Computational Linguistics for Uralic Languages",
month = nov,
year = "2024",
address = "Helsinki, Finland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.iwclul-1.13",
pages = "104--108",
abstract = "Adapting multilingual language models to specific languages can enhance both their efficiency and performance. In this study, we explore how modifying the vocabulary of a multilingual encoder model to better suit the Estonian language affects its downstream performance on the Named Entity Recognition (NER) task. The motivations for adjusting the vocabulary are twofold: practical benefits affecting the computational cost, such as reducing the input sequence length and the model size, and performance enhancements by tailoring the vocabulary to the particular language. We evaluate the effectiveness of two vocabulary adaptation approaches{---}retraining the tokenizer and pruning unused tokens{---}and assess their impact on the model{'}s performance, particularly after continual training. While retraining the tokenizer degraded the performance of the NER task, suggesting that longer embedding tuning might be needed, we observed no negative effects on pruning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dorkin-etal-2024-erzya">
<titleInfo>
<title>On Erzya and Moksha Corpora and Analyzer Development, ERME-PSLA 1950s</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aleksei</namePart>
<namePart type="family">Dorkin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taido</namePart>
<namePart type="family">Purason</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kairit</namePart>
<namePart type="family">Sirts</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 9th International Workshop on Computational Linguistics for Uralic Languages</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mika</namePart>
<namePart type="family">Hämäläinen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Flammie</namePart>
<namePart type="family">Pirinen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Melany</namePart>
<namePart type="family">Macias</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mario</namePart>
<namePart type="family">Crespo Avila</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Helsinki, Finland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Adapting multilingual language models to specific languages can enhance both their efficiency and performance. In this study, we explore how modifying the vocabulary of a multilingual encoder model to better suit the Estonian language affects its downstream performance on the Named Entity Recognition (NER) task. The motivations for adjusting the vocabulary are twofold: practical benefits affecting the computational cost, such as reducing the input sequence length and the model size, and performance enhancements by tailoring the vocabulary to the particular language. We evaluate the effectiveness of two vocabulary adaptation approaches—retraining the tokenizer and pruning unused tokens—and assess their impact on the model’s performance, particularly after continual training. While retraining the tokenizer degraded the performance of the NER task, suggesting that longer embedding tuning might be needed, we observed no negative effects on pruning.</abstract>
<identifier type="citekey">dorkin-etal-2024-erzya</identifier>
<location>
<url>https://aclanthology.org/2024.iwclul-1.13</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>104</start>
<end>108</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T On Erzya and Moksha Corpora and Analyzer Development, ERME-PSLA 1950s
%A Dorkin, Aleksei
%A Purason, Taido
%A Sirts, Kairit
%Y Hämäläinen, Mika
%Y Pirinen, Flammie
%Y Macias, Melany
%Y Crespo Avila, Mario
%S Proceedings of the 9th International Workshop on Computational Linguistics for Uralic Languages
%D 2024
%8 November
%I Association for Computational Linguistics
%C Helsinki, Finland
%F dorkin-etal-2024-erzya
%X Adapting multilingual language models to specific languages can enhance both their efficiency and performance. In this study, we explore how modifying the vocabulary of a multilingual encoder model to better suit the Estonian language affects its downstream performance on the Named Entity Recognition (NER) task. The motivations for adjusting the vocabulary are twofold: practical benefits affecting the computational cost, such as reducing the input sequence length and the model size, and performance enhancements by tailoring the vocabulary to the particular language. We evaluate the effectiveness of two vocabulary adaptation approaches—retraining the tokenizer and pruning unused tokens—and assess their impact on the model’s performance, particularly after continual training. While retraining the tokenizer degraded the performance of the NER task, suggesting that longer embedding tuning might be needed, we observed no negative effects on pruning.
%U https://aclanthology.org/2024.iwclul-1.13
%P 104-108
Markdown (Informal)
[On Erzya and Moksha Corpora and Analyzer Development, ERME-PSLA 1950s](https://aclanthology.org/2024.iwclul-1.13) (Dorkin et al., IWCLUL 2024)
ACL