@inproceedings{guo-etal-2025-general,
title = "From General Reward to Targeted Reward: Improving Open-ended Long-context Generation Models",
author = "Guo, Zhihan and
Wu, Jiele and
Cui, Wenqian and
Zhang, Yifei and
Hu, Minda and
Wang, Yufei and
King, Irwin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.260/",
doi = "10.18653/v1/2025.emnlp-main.260",
pages = "5151--5166",
ISBN = "979-8-89176-332-6",
abstract = "Current research on long-form context in Large Language Models (LLMs) primarily focuses on the understanding of long-contexts, the **Open-ended Long Text Generation** (Open-LTG) remains insufficiently explored. Training a long text generation model requires curation of gold-standard reference data, which is typically nonexistent for informative Open-LTG tasks. However, previous methods only utilize general assessments as reward signals, which limits accuracy. To bridge this gap, we introduce **ProxyReward**, an innovative reinforcement learning (RL) based framework, which includes a data synthesis method and a novel reward signal. Firstly, **ProxyReward Dataset** synthesis is accomplished through simple prompts that enables the model to create automatically, obviating extensive labeled data or significant manual effort. Secondly, **ProxyReward Signal** offers a targeted evaluation of information comprehensiveness and accuracy for specific questions. The experimental results indicate that our method ProxyReward **surpasses even GPT-4-Turbo**. It can significantly enhance performance by 20{\%} on the Open-LTG task when training widely used open-source models, while also surpassing the LLM-as-a-Judge approach. Our work presents effective methods to enhance the ability of LLMs to address complex open-ended questions posed by humans."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="guo-etal-2025-general">
<titleInfo>
<title>From General Reward to Targeted Reward: Improving Open-ended Long-context Generation Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhihan</namePart>
<namePart type="family">Guo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jiele</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenqian</namePart>
<namePart type="family">Cui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yifei</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Minda</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yufei</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Irwin</namePart>
<namePart type="family">King</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>Current research on long-form context in Large Language Models (LLMs) primarily focuses on the understanding of long-contexts, the **Open-ended Long Text Generation** (Open-LTG) remains insufficiently explored. Training a long text generation model requires curation of gold-standard reference data, which is typically nonexistent for informative Open-LTG tasks. However, previous methods only utilize general assessments as reward signals, which limits accuracy. To bridge this gap, we introduce **ProxyReward**, an innovative reinforcement learning (RL) based framework, which includes a data synthesis method and a novel reward signal. Firstly, **ProxyReward Dataset** synthesis is accomplished through simple prompts that enables the model to create automatically, obviating extensive labeled data or significant manual effort. Secondly, **ProxyReward Signal** offers a targeted evaluation of information comprehensiveness and accuracy for specific questions. The experimental results indicate that our method ProxyReward **surpasses even GPT-4-Turbo**. It can significantly enhance performance by 20% on the Open-LTG task when training widely used open-source models, while also surpassing the LLM-as-a-Judge approach. Our work presents effective methods to enhance the ability of LLMs to address complex open-ended questions posed by humans.</abstract>
<identifier type="citekey">guo-etal-2025-general</identifier>
<identifier type="doi">10.18653/v1/2025.emnlp-main.260</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.260/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>5151</start>
<end>5166</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T From General Reward to Targeted Reward: Improving Open-ended Long-context Generation Models
%A Guo, Zhihan
%A Wu, Jiele
%A Cui, Wenqian
%A Zhang, Yifei
%A Hu, Minda
%A Wang, Yufei
%A King, Irwin
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F guo-etal-2025-general
%X Current research on long-form context in Large Language Models (LLMs) primarily focuses on the understanding of long-contexts, the **Open-ended Long Text Generation** (Open-LTG) remains insufficiently explored. Training a long text generation model requires curation of gold-standard reference data, which is typically nonexistent for informative Open-LTG tasks. However, previous methods only utilize general assessments as reward signals, which limits accuracy. To bridge this gap, we introduce **ProxyReward**, an innovative reinforcement learning (RL) based framework, which includes a data synthesis method and a novel reward signal. Firstly, **ProxyReward Dataset** synthesis is accomplished through simple prompts that enables the model to create automatically, obviating extensive labeled data or significant manual effort. Secondly, **ProxyReward Signal** offers a targeted evaluation of information comprehensiveness and accuracy for specific questions. The experimental results indicate that our method ProxyReward **surpasses even GPT-4-Turbo**. It can significantly enhance performance by 20% on the Open-LTG task when training widely used open-source models, while also surpassing the LLM-as-a-Judge approach. Our work presents effective methods to enhance the ability of LLMs to address complex open-ended questions posed by humans.
%R 10.18653/v1/2025.emnlp-main.260
%U https://aclanthology.org/2025.emnlp-main.260/
%U https://doi.org/10.18653/v1/2025.emnlp-main.260
%P 5151-5166
Markdown (Informal)
[From General Reward to Targeted Reward: Improving Open-ended Long-context Generation Models](https://aclanthology.org/2025.emnlp-main.260/) (Guo et al., EMNLP 2025)
ACL