@inproceedings{pala-etal-2025-error,
title = "Error Typing for Smarter Rewards: Improving Process Reward Models with Error-Aware Hierarchical Supervision",
author = "Pala, Tej Deep and
Sharma, Panshul and
Zadeh, Amir and
Li, Chuan and
Poria, Soujanya",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.807/",
pages = "14940--14954",
ISBN = "979-8-89176-335-7",
abstract = "Large Language Models (LLMs) are prone to hallucination, especially during multi{-}hop and reasoning-intensive tasks such as mathematical problem solving. While Outcome Reward Models verify only final answers, Process Reward Models (PRMs) score each intermediate step to steer generation toward coherent solutions. We introduce PathFinder{-}PRM, a novel hierarchical, error{-}aware discriminative PRM that first classifies math and consistency errors at each step, then combines these fine{-}grained signals to estimate step correctness. To train PathFinder{-}PRM, we construct a 400K{-}sample dataset by enriching the human{-}annotated PRM800K corpus and RLHFlow Mistral traces with three{-}dimensional step{-}level labels. On PRMBench, PathFinder{-}PRM achieves a new state{-}of{-}the{-}art PRMScore of 67.7, outperforming the prior best (65.5) while using 3{\texttimes} less data. When applied to reward guided greedy search, our model yields prm@8 48.3, a +1.5 point gain over the strongest baseline. These results demonstrate that decoupled error detection and reward estimation not only boost fine{-}grained error detection but also substantially improve end{-}to{-}end, reward{-}guided mathematical reasoning with greater data efficiency. Our code is available at https://github.com/declare-lab/PathFinder-PRM."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pala-etal-2025-error">
<titleInfo>
<title>Error Typing for Smarter Rewards: Improving Process Reward Models with Error-Aware Hierarchical Supervision</title>
</titleInfo>
<name type="personal">
<namePart type="given">Tej</namePart>
<namePart type="given">Deep</namePart>
<namePart type="family">Pala</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Panshul</namePart>
<namePart type="family">Sharma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zadeh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chuan</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>Large Language Models (LLMs) are prone to hallucination, especially during multi-hop and reasoning-intensive tasks such as mathematical problem solving. While Outcome Reward Models verify only final answers, Process Reward Models (PRMs) score each intermediate step to steer generation toward coherent solutions. We introduce PathFinder-PRM, a novel hierarchical, error-aware discriminative PRM that first classifies math and consistency errors at each step, then combines these fine-grained signals to estimate step correctness. To train PathFinder-PRM, we construct a 400K-sample dataset by enriching the human-annotated PRM800K corpus and RLHFlow Mistral traces with three-dimensional step-level labels. On PRMBench, PathFinder-PRM achieves a new state-of-the-art PRMScore of 67.7, outperforming the prior best (65.5) while using 3× less data. When applied to reward guided greedy search, our model yields prm@8 48.3, a +1.5 point gain over the strongest baseline. These results demonstrate that decoupled error detection and reward estimation not only boost fine-grained error detection but also substantially improve end-to-end, reward-guided mathematical reasoning with greater data efficiency. Our code is available at https://github.com/declare-lab/PathFinder-PRM.</abstract>
<identifier type="citekey">pala-etal-2025-error</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.807/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>14940</start>
<end>14954</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Error Typing for Smarter Rewards: Improving Process Reward Models with Error-Aware Hierarchical Supervision
%A Pala, Tej Deep
%A Sharma, Panshul
%A Zadeh, Amir
%A Li, Chuan
%A Poria, Soujanya
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F pala-etal-2025-error
%X Large Language Models (LLMs) are prone to hallucination, especially during multi-hop and reasoning-intensive tasks such as mathematical problem solving. While Outcome Reward Models verify only final answers, Process Reward Models (PRMs) score each intermediate step to steer generation toward coherent solutions. We introduce PathFinder-PRM, a novel hierarchical, error-aware discriminative PRM that first classifies math and consistency errors at each step, then combines these fine-grained signals to estimate step correctness. To train PathFinder-PRM, we construct a 400K-sample dataset by enriching the human-annotated PRM800K corpus and RLHFlow Mistral traces with three-dimensional step-level labels. On PRMBench, PathFinder-PRM achieves a new state-of-the-art PRMScore of 67.7, outperforming the prior best (65.5) while using 3× less data. When applied to reward guided greedy search, our model yields prm@8 48.3, a +1.5 point gain over the strongest baseline. These results demonstrate that decoupled error detection and reward estimation not only boost fine-grained error detection but also substantially improve end-to-end, reward-guided mathematical reasoning with greater data efficiency. Our code is available at https://github.com/declare-lab/PathFinder-PRM.
%U https://aclanthology.org/2025.findings-emnlp.807/
%P 14940-14954
Markdown (Informal)
[Error Typing for Smarter Rewards: Improving Process Reward Models with Error-Aware Hierarchical Supervision](https://aclanthology.org/2025.findings-emnlp.807/) (Pala et al., Findings 2025)
ACL