@inproceedings{takase-etal-2025-large,
title = "Large Vocabulary Size Improves Large Language Models",
author = "Takase, Sho and
Ri, Ryokan and
Kiyono, Shun and
Kato, Takuya",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-acl.57/",
doi = "10.18653/v1/2025.findings-acl.57",
pages = "1015--1026",
ISBN = "979-8-89176-256-5",
abstract = "This paper empirically investigates the relationship between subword vocabulary size and the performance of large language models (LLMs) to provide insights on how to define the vocabulary size. Experimental results show that larger vocabulary sizes lead to better performance in LLMs. Moreover, we consider a continual training scenario where a pre-trained language model is trained on a different target language. We introduce a simple method to use a new vocabulary instead of the pre-defined one. We show that using the new vocabulary outperforms the model with the vocabulary used in pre-training."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="takase-etal-2025-large">
<titleInfo>
<title>Large Vocabulary Size Improves Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sho</namePart>
<namePart type="family">Takase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryokan</namePart>
<namePart type="family">Ri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shun</namePart>
<namePart type="family">Kiyono</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Takuya</namePart>
<namePart type="family">Kato</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-256-5</identifier>
</relatedItem>
<abstract>This paper empirically investigates the relationship between subword vocabulary size and the performance of large language models (LLMs) to provide insights on how to define the vocabulary size. Experimental results show that larger vocabulary sizes lead to better performance in LLMs. Moreover, we consider a continual training scenario where a pre-trained language model is trained on a different target language. We introduce a simple method to use a new vocabulary instead of the pre-defined one. We show that using the new vocabulary outperforms the model with the vocabulary used in pre-training.</abstract>
<identifier type="citekey">takase-etal-2025-large</identifier>
<identifier type="doi">10.18653/v1/2025.findings-acl.57</identifier>
<location>
<url>https://aclanthology.org/2025.findings-acl.57/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>1015</start>
<end>1026</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Large Vocabulary Size Improves Large Language Models
%A Takase, Sho
%A Ri, Ryokan
%A Kiyono, Shun
%A Kato, Takuya
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Findings of the Association for Computational Linguistics: ACL 2025
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-256-5
%F takase-etal-2025-large
%X This paper empirically investigates the relationship between subword vocabulary size and the performance of large language models (LLMs) to provide insights on how to define the vocabulary size. Experimental results show that larger vocabulary sizes lead to better performance in LLMs. Moreover, we consider a continual training scenario where a pre-trained language model is trained on a different target language. We introduce a simple method to use a new vocabulary instead of the pre-defined one. We show that using the new vocabulary outperforms the model with the vocabulary used in pre-training.
%R 10.18653/v1/2025.findings-acl.57
%U https://aclanthology.org/2025.findings-acl.57/
%U https://doi.org/10.18653/v1/2025.findings-acl.57
%P 1015-1026
Markdown (Informal)
[Large Vocabulary Size Improves Large Language Models](https://aclanthology.org/2025.findings-acl.57/) (Takase et al., Findings 2025)
ACL
- Sho Takase, Ryokan Ri, Shun Kiyono, and Takuya Kato. 2025. Large Vocabulary Size Improves Large Language Models. In Findings of the Association for Computational Linguistics: ACL 2025, pages 1015–1026, Vienna, Austria. Association for Computational Linguistics.