@inproceedings{goliakova-langlois-2024-bert,
title = "What do {BERT} Word Embeddings Learn about the {F}rench Language?",
author = "Goliakova, Ekaterina and
Langlois, David",
booktitle = "Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)",
month = sep,
year = "2024",
address = "Sofia, Bulgaria",
publisher = "Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences",
url = "https://aclanthology.org/2024.clib-1.2",
pages = "14--32",
abstract = "Pre-trained word embeddings (for example, BERT-like) have been successfully used in a variety of downstream tasks. However, do all embeddings, obtained from the models of the same architecture, encode information in the same way? Does the size of the model correlate to the quality of the information encoding? In this paper, we will attempt to dissect the dimensions of several BERT-like models that were trained on the French language to find where grammatical information (gender, plurality, part of speech) and semantic features might be encoded. In addition to this, we propose a framework for comparing the quality of encoding in different models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="goliakova-langlois-2024-bert">
<titleInfo>
<title>What do BERT Word Embeddings Learn about the French Language?</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Goliakova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Langlois</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)</title>
</titleInfo>
<originInfo>
<publisher>Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences</publisher>
<place>
<placeTerm type="text">Sofia, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Pre-trained word embeddings (for example, BERT-like) have been successfully used in a variety of downstream tasks. However, do all embeddings, obtained from the models of the same architecture, encode information in the same way? Does the size of the model correlate to the quality of the information encoding? In this paper, we will attempt to dissect the dimensions of several BERT-like models that were trained on the French language to find where grammatical information (gender, plurality, part of speech) and semantic features might be encoded. In addition to this, we propose a framework for comparing the quality of encoding in different models.</abstract>
<identifier type="citekey">goliakova-langlois-2024-bert</identifier>
<location>
<url>https://aclanthology.org/2024.clib-1.2</url>
</location>
<part>
<date>2024-09</date>
<extent unit="page">
<start>14</start>
<end>32</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What do BERT Word Embeddings Learn about the French Language?
%A Goliakova, Ekaterina
%A Langlois, David
%S Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024)
%D 2024
%8 September
%I Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences
%C Sofia, Bulgaria
%F goliakova-langlois-2024-bert
%X Pre-trained word embeddings (for example, BERT-like) have been successfully used in a variety of downstream tasks. However, do all embeddings, obtained from the models of the same architecture, encode information in the same way? Does the size of the model correlate to the quality of the information encoding? In this paper, we will attempt to dissect the dimensions of several BERT-like models that were trained on the French language to find where grammatical information (gender, plurality, part of speech) and semantic features might be encoded. In addition to this, we propose a framework for comparing the quality of encoding in different models.
%U https://aclanthology.org/2024.clib-1.2
%P 14-32
Markdown (Informal)
[What do BERT Word Embeddings Learn about the French Language?](https://aclanthology.org/2024.clib-1.2) (Goliakova & Langlois, CLIB 2024)
ACL
- Ekaterina Goliakova and David Langlois. 2024. What do BERT Word Embeddings Learn about the French Language?. In Proceedings of the Sixth International Conference on Computational Linguistics in Bulgaria (CLIB 2024), pages 14–32, Sofia, Bulgaria. Department of Computational Linguistics, Institute for Bulgarian Language, Bulgarian Academy of Sciences.