@inproceedings{nishida-etal-2025-instability,
title = "Instability in Downstream Task Performance During {LLM} Pretraining",
author = "Nishida, Yuto and
Isonuma, Masaru and
Oda, Yusuke",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.1246/",
pages = "22883--22895",
ISBN = "979-8-89176-335-7",
abstract = "When training large language models (LLMs), it is common practice to track downstream task performance throughout the training process and select the checkpoint with the highest validation score.However, downstream metrics often exhibit substantial fluctuations, making it difficult to identify the checkpoint that truly represents the best-performing model.In this study, we empirically analyze the stability of downstream task performance in an LLM trained on diverse web-scale corpora.We find that task scores frequently fluctuate throughout training, both at the aggregate and example levels.To address this instability, we investigate two post-hoc checkpoint integration methods: checkpoint averaging and ensemble, motivated by the hypothesis that aggregating neighboring checkpoints can reduce performance volatility.We demonstrate both empirically and theoretically that these methods improve downstream performance stability without requiring any changes to the training procedure."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nishida-etal-2025-instability">
<titleInfo>
<title>Instability in Downstream Task Performance During LLM Pretraining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuto</namePart>
<namePart type="family">Nishida</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Masaru</namePart>
<namePart type="family">Isonuma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yusuke</namePart>
<namePart type="family">Oda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>When training large language models (LLMs), it is common practice to track downstream task performance throughout the training process and select the checkpoint with the highest validation score.However, downstream metrics often exhibit substantial fluctuations, making it difficult to identify the checkpoint that truly represents the best-performing model.In this study, we empirically analyze the stability of downstream task performance in an LLM trained on diverse web-scale corpora.We find that task scores frequently fluctuate throughout training, both at the aggregate and example levels.To address this instability, we investigate two post-hoc checkpoint integration methods: checkpoint averaging and ensemble, motivated by the hypothesis that aggregating neighboring checkpoints can reduce performance volatility.We demonstrate both empirically and theoretically that these methods improve downstream performance stability without requiring any changes to the training procedure.</abstract>
<identifier type="citekey">nishida-etal-2025-instability</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.1246/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>22883</start>
<end>22895</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Instability in Downstream Task Performance During LLM Pretraining
%A Nishida, Yuto
%A Isonuma, Masaru
%A Oda, Yusuke
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F nishida-etal-2025-instability
%X When training large language models (LLMs), it is common practice to track downstream task performance throughout the training process and select the checkpoint with the highest validation score.However, downstream metrics often exhibit substantial fluctuations, making it difficult to identify the checkpoint that truly represents the best-performing model.In this study, we empirically analyze the stability of downstream task performance in an LLM trained on diverse web-scale corpora.We find that task scores frequently fluctuate throughout training, both at the aggregate and example levels.To address this instability, we investigate two post-hoc checkpoint integration methods: checkpoint averaging and ensemble, motivated by the hypothesis that aggregating neighboring checkpoints can reduce performance volatility.We demonstrate both empirically and theoretically that these methods improve downstream performance stability without requiring any changes to the training procedure.
%U https://aclanthology.org/2025.findings-emnlp.1246/
%P 22883-22895
Markdown (Informal)
[Instability in Downstream Task Performance During LLM Pretraining](https://aclanthology.org/2025.findings-emnlp.1246/) (Nishida et al., Findings 2025)
ACL