@inproceedings{ghosh-etal-2025-onebench,
title = "{ONEB}ench to Test Them All: Sample-Level Benchmarking Over Open-Ended Capabilities",
author = "Ghosh, Adhiraj and
Dziadzio, Sebastian and
Prabhu, Ameya and
Udandarao, Vishaal and
Albanie, Samuel and
Bethge, Matthias",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.acl-long.1560/",
doi = "10.18653/v1/2025.acl-long.1560",
pages = "32445--32481",
ISBN = "979-8-89176-251-0",
abstract = "Traditional fixed test datasets fall short in evaluating the open-ended capabilities of foundation models. To address this, we propose ONEBench (OpeN-Ended Benchmarking), a new paradigm that consolidates individual evaluation datasets into a unified, ever-expanding sample pool. ONEBench enables custom benchmarks for specific capabilities while reusing and aggregating samples, mitigating overfitting and dataset bias for broader capability assessment. It reframes model evaluation as selecting and aggregating sample-level tests.Transitioning from task-specific benchmarks to ONEBench introduces two challenges: heterogeneity (aggregating diverse metrics) and incompleteness(comparing models tested on different data subsets). To address these, we propose an aggregation algorithm that ensures identifiability (asymptotically recovering ground-truth scores) and rapid convergence, enabling accurate model comparisons with relatively little data. On homogenous datasets, our algorithm produces rankings that highly correlate with average scores. Moreover, it remains robust to over 95{\%} missing measurements, reducing evaluation costs by up to 20x with minimal impact on rankings. We introduce ONEBench-LLM for language models and ONEBench-LMM for vision-language models, unifying evaluations across these domains, and enabling targeted model testing across diverse capabilities."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ghosh-etal-2025-onebench">
<titleInfo>
<title>ONEBench to Test Them All: Sample-Level Benchmarking Over Open-Ended Capabilities</title>
</titleInfo>
<name type="personal">
<namePart type="given">Adhiraj</namePart>
<namePart type="family">Ghosh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sebastian</namePart>
<namePart type="family">Dziadzio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ameya</namePart>
<namePart type="family">Prabhu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vishaal</namePart>
<namePart type="family">Udandarao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Samuel</namePart>
<namePart type="family">Albanie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Matthias</namePart>
<namePart type="family">Bethge</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wanxiang</namePart>
<namePart type="family">Che</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Joyce</namePart>
<namePart type="family">Nabende</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ekaterina</namePart>
<namePart type="family">Shutova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Taher</namePart>
<namePart type="family">Pilehvar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vienna, Austria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-251-0</identifier>
</relatedItem>
<abstract>Traditional fixed test datasets fall short in evaluating the open-ended capabilities of foundation models. To address this, we propose ONEBench (OpeN-Ended Benchmarking), a new paradigm that consolidates individual evaluation datasets into a unified, ever-expanding sample pool. ONEBench enables custom benchmarks for specific capabilities while reusing and aggregating samples, mitigating overfitting and dataset bias for broader capability assessment. It reframes model evaluation as selecting and aggregating sample-level tests.Transitioning from task-specific benchmarks to ONEBench introduces two challenges: heterogeneity (aggregating diverse metrics) and incompleteness(comparing models tested on different data subsets). To address these, we propose an aggregation algorithm that ensures identifiability (asymptotically recovering ground-truth scores) and rapid convergence, enabling accurate model comparisons with relatively little data. On homogenous datasets, our algorithm produces rankings that highly correlate with average scores. Moreover, it remains robust to over 95% missing measurements, reducing evaluation costs by up to 20x with minimal impact on rankings. We introduce ONEBench-LLM for language models and ONEBench-LMM for vision-language models, unifying evaluations across these domains, and enabling targeted model testing across diverse capabilities.</abstract>
<identifier type="citekey">ghosh-etal-2025-onebench</identifier>
<identifier type="doi">10.18653/v1/2025.acl-long.1560</identifier>
<location>
<url>https://aclanthology.org/2025.acl-long.1560/</url>
</location>
<part>
<date>2025-07</date>
<extent unit="page">
<start>32445</start>
<end>32481</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T ONEBench to Test Them All: Sample-Level Benchmarking Over Open-Ended Capabilities
%A Ghosh, Adhiraj
%A Dziadzio, Sebastian
%A Prabhu, Ameya
%A Udandarao, Vishaal
%A Albanie, Samuel
%A Bethge, Matthias
%Y Che, Wanxiang
%Y Nabende, Joyce
%Y Shutova, Ekaterina
%Y Pilehvar, Mohammad Taher
%S Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2025
%8 July
%I Association for Computational Linguistics
%C Vienna, Austria
%@ 979-8-89176-251-0
%F ghosh-etal-2025-onebench
%X Traditional fixed test datasets fall short in evaluating the open-ended capabilities of foundation models. To address this, we propose ONEBench (OpeN-Ended Benchmarking), a new paradigm that consolidates individual evaluation datasets into a unified, ever-expanding sample pool. ONEBench enables custom benchmarks for specific capabilities while reusing and aggregating samples, mitigating overfitting and dataset bias for broader capability assessment. It reframes model evaluation as selecting and aggregating sample-level tests.Transitioning from task-specific benchmarks to ONEBench introduces two challenges: heterogeneity (aggregating diverse metrics) and incompleteness(comparing models tested on different data subsets). To address these, we propose an aggregation algorithm that ensures identifiability (asymptotically recovering ground-truth scores) and rapid convergence, enabling accurate model comparisons with relatively little data. On homogenous datasets, our algorithm produces rankings that highly correlate with average scores. Moreover, it remains robust to over 95% missing measurements, reducing evaluation costs by up to 20x with minimal impact on rankings. We introduce ONEBench-LLM for language models and ONEBench-LMM for vision-language models, unifying evaluations across these domains, and enabling targeted model testing across diverse capabilities.
%R 10.18653/v1/2025.acl-long.1560
%U https://aclanthology.org/2025.acl-long.1560/
%U https://doi.org/10.18653/v1/2025.acl-long.1560
%P 32445-32481
Markdown (Informal)
[ONEBench to Test Them All: Sample-Level Benchmarking Over Open-Ended Capabilities](https://aclanthology.org/2025.acl-long.1560/) (Ghosh et al., ACL 2025)
ACL