@inproceedings{alzahrani-etal-2024-benchmarks,
title = "When Benchmarks are Targets: Revealing the Sensitivity of Large Language Model Leaderboards",
author = "Alzahrani, Norah and
Alyahya, Hisham and
Alnumay, Yazeed and
AlRashed, Sultan and
Alsubaie, Shaykhah and
Almushayqih, Yousef and
Mirza, Faisal and
Alotaibi, Nouf and
Al-Twairesh, Nora and
Alowisheq, Areeb and
Bari, M Saiful and
Khan, Haidar",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.acl-long.744",
doi = "10.18653/v1/2024.acl-long.744",
pages = "13787--13805",
abstract = "Large Language Model (LLM) leaderboards based on benchmark rankings are regularly used to guide practitioners in model selection. Often, the published leaderboard rankings are taken at face value {---} we show this is a (potentially costly) mistake. Under existing leaderboards, the relative performance of LLMs is highly sensitive to (often minute) details. We show that for popular multiple-choice question benchmarks (e.g., MMLU), minor perturbations to the benchmark, such as changing the order of choices or the method of answer selection, result in changes in rankings up to 8 positions. We explain this phenomenon by conducting systematic experiments over three broad categories of benchmark perturbations and identifying the sources of this behavior. Our analysis results in several best-practice recommendations, including the advantage of a *hybrid* scoring method for answer selection. Our study highlights the dangers of relying on simple benchmark evaluations and charts the path for more robust evaluation schemes on the existing benchmarks. The code for this paper is available at [https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness](https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="alzahrani-etal-2024-benchmarks">
<titleInfo>
<title>When Benchmarks are Targets: Revealing the Sensitivity of Large Language Model Leaderboards</title>
</titleInfo>
<name type="personal">
<namePart type="given">Norah</namePart>
<namePart type="family">Alzahrani</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hisham</namePart>
<namePart type="family">Alyahya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yazeed</namePart>
<namePart type="family">Alnumay</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sultan</namePart>
<namePart type="family">AlRashed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shaykhah</namePart>
<namePart type="family">Alsubaie</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yousef</namePart>
<namePart type="family">Almushayqih</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Faisal</namePart>
<namePart type="family">Mirza</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nouf</namePart>
<namePart type="family">Alotaibi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nora</namePart>
<namePart type="family">Al-Twairesh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Areeb</namePart>
<namePart type="family">Alowisheq</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">M</namePart>
<namePart type="given">Saiful</namePart>
<namePart type="family">Bari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Haidar</namePart>
<namePart type="family">Khan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andre</namePart>
<namePart type="family">Martins</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivek</namePart>
<namePart type="family">Srikumar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large Language Model (LLM) leaderboards based on benchmark rankings are regularly used to guide practitioners in model selection. Often, the published leaderboard rankings are taken at face value — we show this is a (potentially costly) mistake. Under existing leaderboards, the relative performance of LLMs is highly sensitive to (often minute) details. We show that for popular multiple-choice question benchmarks (e.g., MMLU), minor perturbations to the benchmark, such as changing the order of choices or the method of answer selection, result in changes in rankings up to 8 positions. We explain this phenomenon by conducting systematic experiments over three broad categories of benchmark perturbations and identifying the sources of this behavior. Our analysis results in several best-practice recommendations, including the advantage of a *hybrid* scoring method for answer selection. Our study highlights the dangers of relying on simple benchmark evaluations and charts the path for more robust evaluation schemes on the existing benchmarks. The code for this paper is available at [https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness](https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness).</abstract>
<identifier type="citekey">alzahrani-etal-2024-benchmarks</identifier>
<identifier type="doi">10.18653/v1/2024.acl-long.744</identifier>
<location>
<url>https://aclanthology.org/2024.acl-long.744</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>13787</start>
<end>13805</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T When Benchmarks are Targets: Revealing the Sensitivity of Large Language Model Leaderboards
%A Alzahrani, Norah
%A Alyahya, Hisham
%A Alnumay, Yazeed
%A AlRashed, Sultan
%A Alsubaie, Shaykhah
%A Almushayqih, Yousef
%A Mirza, Faisal
%A Alotaibi, Nouf
%A Al-Twairesh, Nora
%A Alowisheq, Areeb
%A Bari, M. Saiful
%A Khan, Haidar
%Y Ku, Lun-Wei
%Y Martins, Andre
%Y Srikumar, Vivek
%S Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F alzahrani-etal-2024-benchmarks
%X Large Language Model (LLM) leaderboards based on benchmark rankings are regularly used to guide practitioners in model selection. Often, the published leaderboard rankings are taken at face value — we show this is a (potentially costly) mistake. Under existing leaderboards, the relative performance of LLMs is highly sensitive to (often minute) details. We show that for popular multiple-choice question benchmarks (e.g., MMLU), minor perturbations to the benchmark, such as changing the order of choices or the method of answer selection, result in changes in rankings up to 8 positions. We explain this phenomenon by conducting systematic experiments over three broad categories of benchmark perturbations and identifying the sources of this behavior. Our analysis results in several best-practice recommendations, including the advantage of a *hybrid* scoring method for answer selection. Our study highlights the dangers of relying on simple benchmark evaluations and charts the path for more robust evaluation schemes on the existing benchmarks. The code for this paper is available at [https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness](https://github.com/National-Center-for-AI-Saudi-Arabia/lm-evaluation-harness).
%R 10.18653/v1/2024.acl-long.744
%U https://aclanthology.org/2024.acl-long.744
%U https://doi.org/10.18653/v1/2024.acl-long.744
%P 13787-13805
Markdown (Informal)
[When Benchmarks are Targets: Revealing the Sensitivity of Large Language Model Leaderboards](https://aclanthology.org/2024.acl-long.744) (Alzahrani et al., ACL 2024)
ACL
- Norah Alzahrani, Hisham Alyahya, Yazeed Alnumay, Sultan AlRashed, Shaykhah Alsubaie, Yousef Almushayqih, Faisal Mirza, Nouf Alotaibi, Nora Al-Twairesh, Areeb Alowisheq, M Saiful Bari, and Haidar Khan. 2024. When Benchmarks are Targets: Revealing the Sensitivity of Large Language Model Leaderboards. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13787–13805, Bangkok, Thailand. Association for Computational Linguistics.