@inproceedings{hamdan-etal-2025-r,
title = "{R}-{BPE}: Improving {BPE}-Tokenizers with Token Reuse",
author = "Hamdan, Nancy and
Al Mraikhat, Osama Rakan and
Zaraket, Fadi",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.1169/",
pages = "22951--22959",
ISBN = "979-8-89176-332-6",
abstract = "This paper presents R-BPE, a lightweight framework for adapting existing Byte-Pair Encoding (BPE) tokenizers to better support a specified target language.It reuses tokens from user-excluded languages and creates ID-based maps to resolve the new tokens of the chosen language.We evaluate R-BPE on Arabic as a target language. R-BPE reduced subword fertility by an average of 24.4{\%} across the LLaMA 3.1 8B, Command R 35B, and Qwen 3 8B models.Applied to LLaMA 3.1 8B in continued pretraining mode, R-BPE yields a 7.33{\%} reduction in training time. On the ArabicMMLU benchmark, the resulting model improved by 5.09 points on five in-domain topics and matched the original model{'}s overall performance.It also preserved performance on EnglishMMLU. R-BPE effectively leverages existing models' tokenizers, embedding layers, and performance to better support target languages without incurring model size changes. We release an R-BPE implementation that is compatible with HuggingFace interfaces and thereby readily applicable to a wide range of existing models at https://acr.ps/1L9GPmL."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hamdan-etal-2025-r">
<titleInfo>
<title>R-BPE: Improving BPE-Tokenizers with Token Reuse</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nancy</namePart>
<namePart type="family">Hamdan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Osama</namePart>
<namePart type="given">Rakan</namePart>
<namePart type="family">Al Mraikhat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fadi</namePart>
<namePart type="family">Zaraket</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>This paper presents R-BPE, a lightweight framework for adapting existing Byte-Pair Encoding (BPE) tokenizers to better support a specified target language.It reuses tokens from user-excluded languages and creates ID-based maps to resolve the new tokens of the chosen language.We evaluate R-BPE on Arabic as a target language. R-BPE reduced subword fertility by an average of 24.4% across the LLaMA 3.1 8B, Command R 35B, and Qwen 3 8B models.Applied to LLaMA 3.1 8B in continued pretraining mode, R-BPE yields a 7.33% reduction in training time. On the ArabicMMLU benchmark, the resulting model improved by 5.09 points on five in-domain topics and matched the original model’s overall performance.It also preserved performance on EnglishMMLU. R-BPE effectively leverages existing models’ tokenizers, embedding layers, and performance to better support target languages without incurring model size changes. We release an R-BPE implementation that is compatible with HuggingFace interfaces and thereby readily applicable to a wide range of existing models at https://acr.ps/1L9GPmL.</abstract>
<identifier type="citekey">hamdan-etal-2025-r</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.1169/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>22951</start>
<end>22959</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T R-BPE: Improving BPE-Tokenizers with Token Reuse
%A Hamdan, Nancy
%A Al Mraikhat, Osama Rakan
%A Zaraket, Fadi
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F hamdan-etal-2025-r
%X This paper presents R-BPE, a lightweight framework for adapting existing Byte-Pair Encoding (BPE) tokenizers to better support a specified target language.It reuses tokens from user-excluded languages and creates ID-based maps to resolve the new tokens of the chosen language.We evaluate R-BPE on Arabic as a target language. R-BPE reduced subword fertility by an average of 24.4% across the LLaMA 3.1 8B, Command R 35B, and Qwen 3 8B models.Applied to LLaMA 3.1 8B in continued pretraining mode, R-BPE yields a 7.33% reduction in training time. On the ArabicMMLU benchmark, the resulting model improved by 5.09 points on five in-domain topics and matched the original model’s overall performance.It also preserved performance on EnglishMMLU. R-BPE effectively leverages existing models’ tokenizers, embedding layers, and performance to better support target languages without incurring model size changes. We release an R-BPE implementation that is compatible with HuggingFace interfaces and thereby readily applicable to a wide range of existing models at https://acr.ps/1L9GPmL.
%U https://aclanthology.org/2025.emnlp-main.1169/
%P 22951-22959
Markdown (Informal)
[R-BPE: Improving BPE-Tokenizers with Token Reuse](https://aclanthology.org/2025.emnlp-main.1169/) (Hamdan et al., EMNLP 2025)
ACL
- Nancy Hamdan, Osama Rakan Al Mraikhat, and Fadi Zaraket. 2025. R-BPE: Improving BPE-Tokenizers with Token Reuse. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 22951–22959, Suzhou, China. Association for Computational Linguistics.