@inproceedings{gessler-zeldes-2022-microbert,
title = "{M}icro{BERT}: Effective Training of Low-resource Monolingual {BERT}s through Parameter Reduction and Multitask Learning",
author = "Gessler, Luke and
Zeldes, Amir",
editor = {Ataman, Duygu and
Gonen, Hila and
Ruder, Sebastian and
Firat, Orhan and
G{\"u}l Sahin, G{\"o}zde and
Mirzakhalov, Jamshidbek},
booktitle = "Proceedings of the 2nd Workshop on Multi-lingual Representation Learning (MRL)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.mrl-1.9/",
doi = "10.18653/v1/2022.mrl-1.9",
pages = "86--99",
abstract = "BERT-style contextualized word embedding models are critical for good performance in most NLP tasks, but they are data-hungry and therefore difficult to train for low-resource languages. In this work, we investigate whether a combination of greatly reduced model size and two linguistically rich auxiliary pretraining tasks (part-of-speech tagging and dependency parsing) can help produce better BERTs in a low-resource setting. Results from 7 diverse languages indicate that our model, MicroBERT, is able to produce marked improvements in downstream task evaluations, including gains up to 18{\%} for parser LAS and 11{\%} for NER F1 compared to an mBERT baseline, and we achieve these results with less than 1{\%} of the parameter count of a multilingual BERT base{--}sized model. We conclude that training very small BERTs and leveraging any available labeled data for multitask learning during pretraining can produce models which outperform both their multilingual counterparts and traditional fixed embeddings for low-resource languages."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gessler-zeldes-2022-microbert">
<titleInfo>
<title>MicroBERT: Effective Training of Low-resource Monolingual BERTs through Parameter Reduction and Multitask Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luke</namePart>
<namePart type="family">Gessler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zeldes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2nd Workshop on Multi-lingual Representation Learning (MRL)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Duygu</namePart>
<namePart type="family">Ataman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hila</namePart>
<namePart type="family">Gonen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Ruder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Orhan</namePart>
<namePart type="family">Firat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gözde</namePart>
<namePart type="family">Gül Sahin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jamshidbek</namePart>
<namePart type="family">Mirzakhalov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>BERT-style contextualized word embedding models are critical for good performance in most NLP tasks, but they are data-hungry and therefore difficult to train for low-resource languages. In this work, we investigate whether a combination of greatly reduced model size and two linguistically rich auxiliary pretraining tasks (part-of-speech tagging and dependency parsing) can help produce better BERTs in a low-resource setting. Results from 7 diverse languages indicate that our model, MicroBERT, is able to produce marked improvements in downstream task evaluations, including gains up to 18% for parser LAS and 11% for NER F1 compared to an mBERT baseline, and we achieve these results with less than 1% of the parameter count of a multilingual BERT base–sized model. We conclude that training very small BERTs and leveraging any available labeled data for multitask learning during pretraining can produce models which outperform both their multilingual counterparts and traditional fixed embeddings for low-resource languages.</abstract>
<identifier type="citekey">gessler-zeldes-2022-microbert</identifier>
<identifier type="doi">10.18653/v1/2022.mrl-1.9</identifier>
<location>
<url>https://aclanthology.org/2022.mrl-1.9/</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>86</start>
<end>99</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T MicroBERT: Effective Training of Low-resource Monolingual BERTs through Parameter Reduction and Multitask Learning
%A Gessler, Luke
%A Zeldes, Amir
%Y Ataman, Duygu
%Y Gonen, Hila
%Y Ruder, Sebastian
%Y Firat, Orhan
%Y Gül Sahin, Gözde
%Y Mirzakhalov, Jamshidbek
%S Proceedings of the 2nd Workshop on Multi-lingual Representation Learning (MRL)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F gessler-zeldes-2022-microbert
%X BERT-style contextualized word embedding models are critical for good performance in most NLP tasks, but they are data-hungry and therefore difficult to train for low-resource languages. In this work, we investigate whether a combination of greatly reduced model size and two linguistically rich auxiliary pretraining tasks (part-of-speech tagging and dependency parsing) can help produce better BERTs in a low-resource setting. Results from 7 diverse languages indicate that our model, MicroBERT, is able to produce marked improvements in downstream task evaluations, including gains up to 18% for parser LAS and 11% for NER F1 compared to an mBERT baseline, and we achieve these results with less than 1% of the parameter count of a multilingual BERT base–sized model. We conclude that training very small BERTs and leveraging any available labeled data for multitask learning during pretraining can produce models which outperform both their multilingual counterparts and traditional fixed embeddings for low-resource languages.
%R 10.18653/v1/2022.mrl-1.9
%U https://aclanthology.org/2022.mrl-1.9/
%U https://doi.org/10.18653/v1/2022.mrl-1.9
%P 86-99
Markdown (Informal)
[MicroBERT: Effective Training of Low-resource Monolingual BERTs through Parameter Reduction and Multitask Learning](https://aclanthology.org/2022.mrl-1.9/) (Gessler & Zeldes, MRL 2022)
ACL