@inproceedings{zhao-etal-2025-dream,
title = "Dream to Chat: Model-based Reinforcement Learning on Dialogues with User Belief Modeling",
author = "Zhao, Yue and
Wang, Xiaoyu and
Wang, Dan and
Jiang, Zhonglin and
Gu, Qingqing and
Chen, Teng and
Xi, Ningyuan and
Qu, Jinxian and
Chen, Yong and
Ji, Luo",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2025.findings-emnlp.256/",
doi = "10.18653/v1/2025.findings-emnlp.256",
pages = "4764--4781",
ISBN = "979-8-89176-335-7",
abstract = "World models have been widely utilized in robotics, gaming, and autonomous driving. However, their applications to natural language tasks are relatively limited. In this paper, we construct the dialogue world model, which could predict future utterances and user beliefs, including emotion, sentiment, and intention. In this paper, we propose a framework called DreamCUB, which shows that this user belief modeling and the entire dialogue world model can be established by LLM post-training. By defining a POMDP, we apply model-based reinforcement learning to the dialogue system and solve it by maximizing the information bottleneck. Experiments show that the pretrained dialogue world model can achieve state-of-the-art performances on emotion classification and sentiment identification, while dialogue quality is also enhanced by joint training of policy, critic and dialogue world model. Further analysis reveals that DreamCUB holds a reasonable exploration-exploitation balance and also transfers well to out-of-domain scenarios such as empathetic dialogues."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhao-etal-2025-dream">
<titleInfo>
<title>Dream to Chat: Model-based Reinforcement Learning on Dialogues with User Belief Modeling</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xiaoyu</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dan</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhonglin</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Qingqing</namePart>
<namePart type="family">Gu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Teng</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ningyuan</namePart>
<namePart type="family">Xi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinxian</namePart>
<namePart type="family">Qu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yong</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luo</namePart>
<namePart type="family">Ji</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2025-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2025</title>
</titleInfo>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tanmoy</namePart>
<namePart type="family">Chakraborty</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carolyn</namePart>
<namePart type="family">Rose</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Violet</namePart>
<namePart type="family">Peng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Suzhou, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
<identifier type="isbn">979-8-89176-335-7</identifier>
</relatedItem>
<abstract>World models have been widely utilized in robotics, gaming, and autonomous driving. However, their applications to natural language tasks are relatively limited. In this paper, we construct the dialogue world model, which could predict future utterances and user beliefs, including emotion, sentiment, and intention. In this paper, we propose a framework called DreamCUB, which shows that this user belief modeling and the entire dialogue world model can be established by LLM post-training. By defining a POMDP, we apply model-based reinforcement learning to the dialogue system and solve it by maximizing the information bottleneck. Experiments show that the pretrained dialogue world model can achieve state-of-the-art performances on emotion classification and sentiment identification, while dialogue quality is also enhanced by joint training of policy, critic and dialogue world model. Further analysis reveals that DreamCUB holds a reasonable exploration-exploitation balance and also transfers well to out-of-domain scenarios such as empathetic dialogues.</abstract>
<identifier type="citekey">zhao-etal-2025-dream</identifier>
<identifier type="doi">10.18653/v1/2025.findings-emnlp.256</identifier>
<location>
<url>https://aclanthology.org/2025.findings-emnlp.256/</url>
</location>
<part>
<date>2025-11</date>
<extent unit="page">
<start>4764</start>
<end>4781</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dream to Chat: Model-based Reinforcement Learning on Dialogues with User Belief Modeling
%A Zhao, Yue
%A Wang, Xiaoyu
%A Wang, Dan
%A Jiang, Zhonglin
%A Gu, Qingqing
%A Chen, Teng
%A Xi, Ningyuan
%A Qu, Jinxian
%A Chen, Yong
%A Ji, Luo
%Y Christodoulopoulos, Christos
%Y Chakraborty, Tanmoy
%Y Rose, Carolyn
%Y Peng, Violet
%S Findings of the Association for Computational Linguistics: EMNLP 2025
%D 2025
%8 November
%I Association for Computational Linguistics
%C Suzhou, China
%@ 979-8-89176-335-7
%F zhao-etal-2025-dream
%X World models have been widely utilized in robotics, gaming, and autonomous driving. However, their applications to natural language tasks are relatively limited. In this paper, we construct the dialogue world model, which could predict future utterances and user beliefs, including emotion, sentiment, and intention. In this paper, we propose a framework called DreamCUB, which shows that this user belief modeling and the entire dialogue world model can be established by LLM post-training. By defining a POMDP, we apply model-based reinforcement learning to the dialogue system and solve it by maximizing the information bottleneck. Experiments show that the pretrained dialogue world model can achieve state-of-the-art performances on emotion classification and sentiment identification, while dialogue quality is also enhanced by joint training of policy, critic and dialogue world model. Further analysis reveals that DreamCUB holds a reasonable exploration-exploitation balance and also transfers well to out-of-domain scenarios such as empathetic dialogues.
%R 10.18653/v1/2025.findings-emnlp.256
%U https://aclanthology.org/2025.findings-emnlp.256/
%U https://doi.org/10.18653/v1/2025.findings-emnlp.256
%P 4764-4781
Markdown (Informal)
[Dream to Chat: Model-based Reinforcement Learning on Dialogues with User Belief Modeling](https://aclanthology.org/2025.findings-emnlp.256/) (Zhao et al., Findings 2025)
ACL
- Yue Zhao, Xiaoyu Wang, Dan Wang, Zhonglin Jiang, Qingqing Gu, Teng Chen, Ningyuan Xi, Jinxian Qu, Yong Chen, and Luo Ji. 2025. Dream to Chat: Model-based Reinforcement Learning on Dialogues with User Belief Modeling. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 4764–4781, Suzhou, China. Association for Computational Linguistics.