@inproceedings{hung-etal-2025-reward,
title = "Reward-Guided Tree Search for Inference Time Alignment of Large Language Models",
author = "Hung, Chia-Yu and
Majumder, Navonil and
Mehrish, Ambuj and
Poria, Soujanya",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.naacl-long.625/",
doi = "10.18653/v1/2025.naacl-long.625",
pages = "12575--12593",
ISBN = "979-8-89176-189-6",
abstract = "Inference-time computation methods enhance the performance of Large Language Models (LLMs) by leveraging additional computational resources to achieve superior results. Common techniques, such as Best-of-N sampling, Majority Voting, and variants of tree-search algorithm have proven to be effective in boosting the performance of LLMs. These approaches strategically trade increased computational resource for improved model responses. In this work, we proposed DARWIN, an inference-time alignment method that leverage the guidance of a reward model to achieve alignment through reward-guided tree search. Empirical evidences indicates that our method outperform other inference-time alignment methods such as Best-of-N and ARGS on two widely accepted alignment benchmarks AlpacaEval 2 and MT-Bench. Furthermore, we show that our inference-time approach achieves performance comparable to preference-tuned models on both benchmarks, highlighting the effectiveness of trading inference-time compute for enhanced performance during inference."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hung-etal-2025-reward">
<titleInfo>
<title>Reward-Guided Tree Search for Inference Time Alignment of Large Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chia-Yu</namePart>
<namePart type="family">Hung</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Navonil</namePart>
<namePart type="family">Majumder</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ambuj</namePart>
<namePart type="family">Mehrish</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Soujanya</namePart>
<namePart type="family">Poria</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-04</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Chiruzzo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alan</namePart>
<namePart type="family">Ritter</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Albuquerque, New Mexico</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-189-6</identifier>
</relatedItem>
<abstract>Inference-time computation methods enhance the performance of Large Language Models (LLMs) by leveraging additional computational resources to achieve superior results. Common techniques, such as Best-of-N sampling, Majority Voting, and variants of tree-search algorithm have proven to be effective in boosting the performance of LLMs. These approaches strategically trade increased computational resource for improved model responses. In this work, we proposed DARWIN, an inference-time alignment method that leverage the guidance of a reward model to achieve alignment through reward-guided tree search. Empirical evidences indicates that our method outperform other inference-time alignment methods such as Best-of-N and ARGS on two widely accepted alignment benchmarks AlpacaEval 2 and MT-Bench. Furthermore, we show that our inference-time approach achieves performance comparable to preference-tuned models on both benchmarks, highlighting the effectiveness of trading inference-time compute for enhanced performance during inference.</abstract>
<identifier type="citekey">hung-etal-2025-reward</identifier>
<identifier type="doi">10.18653/v1/2025.naacl-long.625</identifier>
<location>
<url>https://aclanthology.org/2025.naacl-long.625/</url>
</location>
<part>
<date>2025-04</date>
<extent unit="page">
<start>12575</start>
<end>12593</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reward-Guided Tree Search for Inference Time Alignment of Large Language Models
%A Hung, Chia-Yu
%A Majumder, Navonil
%A Mehrish, Ambuj
%A Poria, Soujanya
%Y Chiruzzo, Luis
%Y Ritter, Alan
%Y Wang, Lu
%S Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)
%D 2025
%8 April
%I Association for Computational Linguistics
%C Albuquerque, New Mexico
%@ 979-8-89176-189-6
%F hung-etal-2025-reward
%X Inference-time computation methods enhance the performance of Large Language Models (LLMs) by leveraging additional computational resources to achieve superior results. Common techniques, such as Best-of-N sampling, Majority Voting, and variants of tree-search algorithm have proven to be effective in boosting the performance of LLMs. These approaches strategically trade increased computational resource for improved model responses. In this work, we proposed DARWIN, an inference-time alignment method that leverage the guidance of a reward model to achieve alignment through reward-guided tree search. Empirical evidences indicates that our method outperform other inference-time alignment methods such as Best-of-N and ARGS on two widely accepted alignment benchmarks AlpacaEval 2 and MT-Bench. Furthermore, we show that our inference-time approach achieves performance comparable to preference-tuned models on both benchmarks, highlighting the effectiveness of trading inference-time compute for enhanced performance during inference.
%R 10.18653/v1/2025.naacl-long.625
%U https://aclanthology.org/2025.naacl-long.625/
%U https://doi.org/10.18653/v1/2025.naacl-long.625
%P 12575-12593
Markdown (Informal)
[Reward-Guided Tree Search for Inference Time Alignment of Large Language Models](https://aclanthology.org/2025.naacl-long.625/) (Hung et al., NAACL 2025)
ACL
- Chia-Yu Hung, Navonil Majumder, Ambuj Mehrish, and Soujanya Poria. 2025. Reward-Guided Tree Search for Inference Time Alignment of Large Language Models. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 12575–12593, Albuquerque, New Mexico. Association for Computational Linguistics.