@inproceedings{zhou-etal-2025-graders,
title = "Graders Should Cheat: Privileged Information Enables Expert-Level Automated Evaluations",
author = "Zhou, Jin Peng and
Arnold, S{\'e}b and
Ding, Nan and
Weinberger, Kilian Q and
Hua, Nan and
Sha, Fei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.838/",
pages = "16583--16601",
ISBN = "979-8-89176-332-6",
abstract = "Auto-evaluating language models (LMs), *i.e*., using a grader LM to evaluate the candidate LM, is an appealing way to accelerate the evaluation process and the cost associated with it. But this presents a paradox: how can we trust the grader LM, which is presumably weaker than the candidate LM, to assess problems that are beyond the frontier of the capabilities of either model or both? For instance, today{'}s LMs struggle on graduate-level physics and Olympiad-level math, making them unreliable graders in these domains. We show that providing *privileged information* {--} such as ground-truth solutions or problem-specific guidelines {--} improves automated evaluations on such frontier problems. This approach offers two key advantages. First, it expands the range of problems where LMs graders apply. Specifically, weaker models can now rate the predictions of stronger models. Second, privileged information can be used to devise easier variations of challenging problems which improves the separability of different LMs on tasks where their performance is generally low. With this approach, general-purpose LM graders match the state of the art performance on *RewardBench*, surpassing almost all the specially-tuned models. LM graders also outperform individual human raters on *Vibe-Eval*, and approach human expert graders on Olympiad-level math problems."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhou-etal-2025-graders">
<titleInfo>
<title>Graders Should Cheat: Privileged Information Enables Expert-Level Automated Evaluations</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jin</namePart>
<namePart type="given">Peng</namePart>
<namePart type="family">Zhou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Séb</namePart>
<namePart type="family">Arnold</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nan</namePart>
<namePart type="family">Ding</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kilian</namePart>
<namePart type="given">Q</namePart>
<namePart type="family">Weinberger</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nan</namePart>
<namePart type="family">Hua</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Sha</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>Auto-evaluating language models (LMs), *i.e*., using a grader LM to evaluate the candidate LM, is an appealing way to accelerate the evaluation process and the cost associated with it. But this presents a paradox: how can we trust the grader LM, which is presumably weaker than the candidate LM, to assess problems that are beyond the frontier of the capabilities of either model or both? For instance, today’s LMs struggle on graduate-level physics and Olympiad-level math, making them unreliable graders in these domains. We show that providing *privileged information* – such as ground-truth solutions or problem-specific guidelines – improves automated evaluations on such frontier problems. This approach offers two key advantages. First, it expands the range of problems where LMs graders apply. Specifically, weaker models can now rate the predictions of stronger models. Second, privileged information can be used to devise easier variations of challenging problems which improves the separability of different LMs on tasks where their performance is generally low. With this approach, general-purpose LM graders match the state of the art performance on *RewardBench*, surpassing almost all the specially-tuned models. LM graders also outperform individual human raters on *Vibe-Eval*, and approach human expert graders on Olympiad-level math problems.</abstract>
<identifier type="citekey">zhou-etal-2025-graders</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.838/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>16583</start>
<end>16601</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Graders Should Cheat: Privileged Information Enables Expert-Level Automated Evaluations
%A Zhou, Jin Peng
%A Arnold, Séb
%A Ding, Nan
%A Weinberger, Kilian Q.
%A Hua, Nan
%A Sha, Fei
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F zhou-etal-2025-graders
%X Auto-evaluating language models (LMs), *i.e*., using a grader LM to evaluate the candidate LM, is an appealing way to accelerate the evaluation process and the cost associated with it. But this presents a paradox: how can we trust the grader LM, which is presumably weaker than the candidate LM, to assess problems that are beyond the frontier of the capabilities of either model or both? For instance, today’s LMs struggle on graduate-level physics and Olympiad-level math, making them unreliable graders in these domains. We show that providing *privileged information* – such as ground-truth solutions or problem-specific guidelines – improves automated evaluations on such frontier problems. This approach offers two key advantages. First, it expands the range of problems where LMs graders apply. Specifically, weaker models can now rate the predictions of stronger models. Second, privileged information can be used to devise easier variations of challenging problems which improves the separability of different LMs on tasks where their performance is generally low. With this approach, general-purpose LM graders match the state of the art performance on *RewardBench*, surpassing almost all the specially-tuned models. LM graders also outperform individual human raters on *Vibe-Eval*, and approach human expert graders on Olympiad-level math problems.
%U https://aclanthology.org/2025.emnlp-main.838/
%P 16583-16601
Markdown (Informal)
[Graders Should Cheat: Privileged Information Enables Expert-Level Automated Evaluations](https://aclanthology.org/2025.emnlp-main.838/) (Zhou et al., EMNLP 2025)
ACL