@inproceedings{hoffmann-etal-2025-improving,
title = "Improving Neutral Point-of-View Generation with Data- and Parameter-Efficient {RL}",
author = "Hoffmann, Jessica and
Ahlheim, Christiane and
Yu, Zac and
Walfrand, Aria and
Jin, Jarvis and
Tano, Marie and
Beirami, Ahmad and
van Liemt, Erin MacMurray and
Thain, Nithum and
Sidahmed, Hakim and
Dixon, Lucas",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.emnlp-main.1395/",
pages = "27426--27455",
ISBN = "979-8-89176-332-6",
abstract = "The paper shows that parameter-efficient reinforcement learning (PE-RL) is a highly effective training regime to improve large language models' (LLMs) ability to answer queries on sensitive topics with a Neutral Point of View (NPOV), i.e. to provide significantly more informative, diverse and impartial answers. This is shown by evaluating PE-RL and multiple strong baselines{---}including LoRA finetuning (strongest baseline), SFT and RLHF. PE-RL not only improves on overall NPOV quality compared to the strongest baseline (97.06{\%} $\rightarrow$ 99.08{\%}), but also scores much higher on features linguists identify as key to separating good answers from the best answers (60.25{\%} $\rightarrow$ 85.21{\%} for presence of supportive details, 68.74{\%} $\rightarrow$ 91.43{\%} for absence of oversimplification). A qualitative analysis corroborates this. Finally, our evaluation finds no statistical differences between results on topics that appear in the training dataset and those on separated evaluation topics, which provides strong evidence that our approach to training PE-RL exhibits very effective out of topic generalization. To enable the study, and enable further future studies we also release the dataset, SHQ-NPOV, and provide a methodology to create such datasets through iterative rounds of human peer-critique and annotator training."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="hoffmann-etal-2025-improving">
<titleInfo>
<title>Improving Neutral Point-of-View Generation with Data- and Parameter-Efficient RL</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jessica</namePart>
<namePart type="family">Hoffmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christiane</namePart>
<namePart type="family">Ahlheim</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zac</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aria</namePart>
<namePart type="family">Walfrand</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jarvis</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marie</namePart>
<namePart type="family">Tano</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ahmad</namePart>
<namePart type="family">Beirami</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erin</namePart>
<namePart type="given">MacMurray</namePart>
<namePart type="family">van Liemt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nithum</namePart>
<namePart type="family">Thain</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hakim</namePart>
<namePart type="family">Sidahmed</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucas</namePart>
<namePart type="family">Dixon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-332-6</identifier>
</relatedItem>
<abstract>The paper shows that parameter-efficient reinforcement learning (PE-RL) is a highly effective training regime to improve large language models’ (LLMs) ability to answer queries on sensitive topics with a Neutral Point of View (NPOV), i.e. to provide significantly more informative, diverse and impartial answers. This is shown by evaluating PE-RL and multiple strong baselines—including LoRA finetuning (strongest baseline), SFT and RLHF. PE-RL not only improves on overall NPOV quality compared to the strongest baseline (97.06% \rightarrow 99.08%), but also scores much higher on features linguists identify as key to separating good answers from the best answers (60.25% \rightarrow 85.21% for presence of supportive details, 68.74% \rightarrow 91.43% for absence of oversimplification). A qualitative analysis corroborates this. Finally, our evaluation finds no statistical differences between results on topics that appear in the training dataset and those on separated evaluation topics, which provides strong evidence that our approach to training PE-RL exhibits very effective out of topic generalization. To enable the study, and enable further future studies we also release the dataset, SHQ-NPOV, and provide a methodology to create such datasets through iterative rounds of human peer-critique and annotator training.</abstract>
<identifier type="citekey">hoffmann-etal-2025-improving</identifier>
<location>
<url>https://aclanthology.org/2025.emnlp-main.1395/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>27426</start>
<end>27455</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Improving Neutral Point-of-View Generation with Data- and Parameter-Efficient RL
%A Hoffmann, Jessica
%A Ahlheim, Christiane
%A Yu, Zac
%A Walfrand, Aria
%A Jin, Jarvis
%A Tano, Marie
%A Beirami, Ahmad
%A van Liemt, Erin MacMurray
%A Thain, Nithum
%A Sidahmed, Hakim
%A Dixon, Lucas
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-332-6
%F hoffmann-etal-2025-improving
%X The paper shows that parameter-efficient reinforcement learning (PE-RL) is a highly effective training regime to improve large language models’ (LLMs) ability to answer queries on sensitive topics with a Neutral Point of View (NPOV), i.e. to provide significantly more informative, diverse and impartial answers. This is shown by evaluating PE-RL and multiple strong baselines—including LoRA finetuning (strongest baseline), SFT and RLHF. PE-RL not only improves on overall NPOV quality compared to the strongest baseline (97.06% \rightarrow 99.08%), but also scores much higher on features linguists identify as key to separating good answers from the best answers (60.25% \rightarrow 85.21% for presence of supportive details, 68.74% \rightarrow 91.43% for absence of oversimplification). A qualitative analysis corroborates this. Finally, our evaluation finds no statistical differences between results on topics that appear in the training dataset and those on separated evaluation topics, which provides strong evidence that our approach to training PE-RL exhibits very effective out of topic generalization. To enable the study, and enable further future studies we also release the dataset, SHQ-NPOV, and provide a methodology to create such datasets through iterative rounds of human peer-critique and annotator training.
%U https://aclanthology.org/2025.emnlp-main.1395/
%P 27426-27455
Markdown (Informal)
[Improving Neutral Point-of-View Generation with Data- and Parameter-Efficient RL](https://aclanthology.org/2025.emnlp-main.1395/) (Hoffmann et al., EMNLP 2025)
ACL
- Jessica Hoffmann, Christiane Ahlheim, Zac Yu, Aria Walfrand, Jarvis Jin, Marie Tano, Ahmad Beirami, Erin MacMurray van Liemt, Nithum Thain, Hakim Sidahmed, and Lucas Dixon. 2025. Improving Neutral Point-of-View Generation with Data- and Parameter-Efficient RL. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing, pages 27426–27455, Suzhou, China. Association for Computational Linguistics.