@inproceedings{williams-aletras-2025-vocabulary,
title = "Vocabulary-level Memory Efficiency for Language Model Fine-tuning",
author = "Williams, Miles and
Aletras, Nikolaos",
editor = "Adlakha, Vaibhav and
Chronopoulou, Alexandra and
Li, Xiang Lorraine and
Majumder, Bodhisattwa Prasad and
Shi, Freda and
Vernikos, Giorgos",
booktitle = "Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)",
month = may,
year = "2025",
address = "Albuquerque, NM",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.repl4nlp-1.14/",
doi = "10.18653/v1/2025.repl4nlp-1.14",
pages = "185--196",
ISBN = "979-8-89176-245-9",
abstract = "The extensive memory footprint of language model (LM) fine-tuning poses a challenge for both researchers and practitioners. LMs use an embedding matrix to represent extensive vocabularies, forming a substantial proportion of the model parameters. While previous work towards memory-efficient fine-tuning has focused on minimizing the number of trainable parameters, reducing the memory footprint of the embedding matrix has yet to be explored. We first demonstrate that a significant proportion of the vocabulary remains unused during fine-tuning. We then propose a simple yet effective approach that leverages this finding to minimize memory usage. We show that our approach provides substantial reductions in memory usage across a wide range of models and tasks. Notably, our approach does not impact downstream task performance, while allowing more efficient use of computational resources."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="williams-aletras-2025-vocabulary">
<titleInfo>
<title>Vocabulary-level Memory Efficiency for Language Model Fine-tuning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miles</namePart>
<namePart type="family">Williams</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikolaos</namePart>
<namePart type="family">Aletras</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Vaibhav</namePart>
<namePart type="family">Adlakha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandra</namePart>
<namePart type="family">Chronopoulou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiang</namePart>
<namePart type="given">Lorraine</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bodhisattwa</namePart>
<namePart type="given">Prasad</namePart>
<namePart type="family">Majumder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Freda</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Giorgos</namePart>
<namePart type="family">Vernikos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, NM</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-245-9</identifier>
</relatedItem>
<abstract>The extensive memory footprint of language model (LM) fine-tuning poses a challenge for both researchers and practitioners. LMs use an embedding matrix to represent extensive vocabularies, forming a substantial proportion of the model parameters. While previous work towards memory-efficient fine-tuning has focused on minimizing the number of trainable parameters, reducing the memory footprint of the embedding matrix has yet to be explored. We first demonstrate that a significant proportion of the vocabulary remains unused during fine-tuning. We then propose a simple yet effective approach that leverages this finding to minimize memory usage. We show that our approach provides substantial reductions in memory usage across a wide range of models and tasks. Notably, our approach does not impact downstream task performance, while allowing more efficient use of computational resources.</abstract>
<identifier type="citekey">williams-aletras-2025-vocabulary</identifier>
<identifier type="doi">10.18653/v1/2025.repl4nlp-1.14</identifier>
<location>
<url>https://aclanthology.org/2025.repl4nlp-1.14/</url>
</location>
<part>
<date>2025-05</date>
<extent unit="page">
<start>185</start>
<end>196</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Vocabulary-level Memory Efficiency for Language Model Fine-tuning
%A Williams, Miles
%A Aletras, Nikolaos
%Y Adlakha, Vaibhav
%Y Chronopoulou, Alexandra
%Y Li, Xiang Lorraine
%Y Majumder, Bodhisattwa Prasad
%Y Shi, Freda
%Y Vernikos, Giorgos
%S Proceedings of the 10th Workshop on Representation Learning for NLP (RepL4NLP-2025)
%D 2025
%8 May
%I Association for Computational Linguistics
%C Albuquerque, NM
%@ 979-8-89176-245-9
%F williams-aletras-2025-vocabulary
%X The extensive memory footprint of language model (LM) fine-tuning poses a challenge for both researchers and practitioners. LMs use an embedding matrix to represent extensive vocabularies, forming a substantial proportion of the model parameters. While previous work towards memory-efficient fine-tuning has focused on minimizing the number of trainable parameters, reducing the memory footprint of the embedding matrix has yet to be explored. We first demonstrate that a significant proportion of the vocabulary remains unused during fine-tuning. We then propose a simple yet effective approach that leverages this finding to minimize memory usage. We show that our approach provides substantial reductions in memory usage across a wide range of models and tasks. Notably, our approach does not impact downstream task performance, while allowing more efficient use of computational resources.
%R 10.18653/v1/2025.repl4nlp-1.14
%U https://aclanthology.org/2025.repl4nlp-1.14/
%U https://doi.org/10.18653/v1/2025.repl4nlp-1.14
%P 185-196
Markdown (Informal)
[Vocabulary-level Memory Efficiency for Language Model Fine-tuning](https://aclanthology.org/2025.repl4nlp-1.14/) (Williams & Aletras, RepL4NLP 2025)
ACL