@inproceedings{helm-etal-2025-token,
title = "Token Weighting for Long-Range Language Modeling",
author = "Helm, Falko and
Daheim, Nico and
Gurevych, Iryna",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-naacl.79/",
doi = "10.18653/v1/2025.findings-naacl.79",
pages = "1440--1459",
ISBN = "979-8-89176-195-7",
abstract = "Many applications of large language models (LLMs) require long-context understanding, but models continue to struggle with such tasks. We hypothesize that conventional next-token prediction training could contribute to this, because each token is assigned equal weight. Yet, intuitively, the amount of context needed to predict the next token accurately varies greatly across different data. To reflect this, we propose various novel token-weighting schemes that assign different weights to each training token in the loss, thereby generalizing existing works. For this, we categorize token-weighting methods using a two-step framework which compares the confidences of a long-context and short-context model to score tokens. We evaluate all methods on multiple long-context understanding tasks and show that non-uniform loss weights are helpful to improve the long-context abilities of LLMs.Different short-context models can be used effectively for token scoring, including models that are much smaller than the long-context model that is trained.All in all, this work contributes to a better understanding of the trade-offs long-context language modeling faces and provides guidelines for model steering via loss-weighting based on empirical evidence. The code can be found on [Github](https://github.com/UKPLab/naacl2025-token-weighting)."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="helm-etal-2025-token">
<titleInfo>
<title>Token Weighting for Long-Range Language Modeling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Falko</namePart>
<namePart type="family">Helm</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nico</namePart>
<namePart type="family">Daheim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iryna</namePart>
<namePart type="family">Gurevych</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: NAACL 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-195-7</identifier>
</relatedItem>
<abstract>Many applications of large language models (LLMs) require long-context understanding, but models continue to struggle with such tasks. We hypothesize that conventional next-token prediction training could contribute to this, because each token is assigned equal weight. Yet, intuitively, the amount of context needed to predict the next token accurately varies greatly across different data. To reflect this, we propose various novel token-weighting schemes that assign different weights to each training token in the loss, thereby generalizing existing works. For this, we categorize token-weighting methods using a two-step framework which compares the confidences of a long-context and short-context model to score tokens. We evaluate all methods on multiple long-context understanding tasks and show that non-uniform loss weights are helpful to improve the long-context abilities of LLMs.Different short-context models can be used effectively for token scoring, including models that are much smaller than the long-context model that is trained.All in all, this work contributes to a better understanding of the trade-offs long-context language modeling faces and provides guidelines for model steering via loss-weighting based on empirical evidence. The code can be found on [Github](https://github.com/UKPLab/naacl2025-token-weighting).</abstract>
<identifier type="citekey">helm-etal-2025-token</identifier>
<identifier type="doi">10.18653/v1/2025.findings-naacl.79</identifier>
<location>
<url>https://aclanthology.org/2025.findings-naacl.79/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>1440</start>
<end>1459</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Token Weighting for Long-Range Language Modeling
%A Helm, Falko
%A Daheim, Nico
%A Gurevych, Iryna
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Findings of the Association for Computational Linguistics: NAACL 2025
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-195-7
%F helm-etal-2025-token
%X Many applications of large language models (LLMs) require long-context understanding, but models continue to struggle with such tasks. We hypothesize that conventional next-token prediction training could contribute to this, because each token is assigned equal weight. Yet, intuitively, the amount of context needed to predict the next token accurately varies greatly across different data. To reflect this, we propose various novel token-weighting schemes that assign different weights to each training token in the loss, thereby generalizing existing works. For this, we categorize token-weighting methods using a two-step framework which compares the confidences of a long-context and short-context model to score tokens. We evaluate all methods on multiple long-context understanding tasks and show that non-uniform loss weights are helpful to improve the long-context abilities of LLMs.Different short-context models can be used effectively for token scoring, including models that are much smaller than the long-context model that is trained.All in all, this work contributes to a better understanding of the trade-offs long-context language modeling faces and provides guidelines for model steering via loss-weighting based on empirical evidence. The code can be found on [Github](https://github.com/UKPLab/naacl2025-token-weighting).
%R 10.18653/v1/2025.findings-naacl.79
%U https://aclanthology.org/2025.findings-naacl.79/
%U https://doi.org/10.18653/v1/2025.findings-naacl.79
%P 1440-1459
Markdown (Informal)
[Token Weighting for Long-Range Language Modeling](https://aclanthology.org/2025.findings-naacl.79/) (Helm et al., Findings 2025)
ACL
- Falko Helm, Nico Daheim, and Iryna Gurevych. 2025. Token Weighting for Long-Range Language Modeling. In Findings of the Association for Computational Linguistics: NAACL 2025, pages 1440–1459, Albuquerque, New Mexico. Association for Computational Linguistics.