@inproceedings{ultes-etal-2017-reward,
title = "Reward-Balancing for Statistical Spoken Dialogue Systems using Multi-objective Reinforcement Learning",
author = "Ultes, Stefan and
Budzianowski, Pawe{\l} and
Casanueva, I{\~n}igo and
Mrk{\v{s}}i{\'c}, Nikola and
Rojas-Barahona, Lina M. and
Su, Pei-Hao and
Wen, Tsung-Hsien and
Ga{\v{s}}i{\'c}, Milica and
Young, Steve",
editor = "Jokinen, Kristiina and
Stede, Manfred and
DeVault, David and
Louis, Annie",
booktitle = "Proceedings of the 18th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = aug,
year = "2017",
address = {Saarbr{\"u}cken, Germany},
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-5509",
doi = "10.18653/v1/W17-5509",
pages = "65--70",
abstract = "Reinforcement learning is widely used for dialogue policy optimization where the reward function often consists of more than one component, e.g., the dialogue success and the dialogue length. In this work, we propose a structured method for finding a good balance between these components by searching for the optimal reward component weighting. To render this search feasible, we use multi-objective reinforcement learning to significantly reduce the number of training dialogues required. We apply our proposed method to find optimized component weights for six domains and compare them to a default baseline.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ultes-etal-2017-reward">
<titleInfo>
<title>Reward-Balancing for Statistical Spoken Dialogue Systems using Multi-objective Reinforcement Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Ultes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paweł</namePart>
<namePart type="family">Budzianowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iñigo</namePart>
<namePart type="family">Casanueva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nikola</namePart>
<namePart type="family">Mrkšić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lina</namePart>
<namePart type="given">M</namePart>
<namePart type="family">Rojas-Barahona</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pei-Hao</namePart>
<namePart type="family">Su</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tsung-Hsien</namePart>
<namePart type="family">Wen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Milica</namePart>
<namePart type="family">Gašić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steve</namePart>
<namePart type="family">Young</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kristiina</namePart>
<namePart type="family">Jokinen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Stede</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">DeVault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Annie</namePart>
<namePart type="family">Louis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Saarbrücken, Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Reinforcement learning is widely used for dialogue policy optimization where the reward function often consists of more than one component, e.g., the dialogue success and the dialogue length. In this work, we propose a structured method for finding a good balance between these components by searching for the optimal reward component weighting. To render this search feasible, we use multi-objective reinforcement learning to significantly reduce the number of training dialogues required. We apply our proposed method to find optimized component weights for six domains and compare them to a default baseline.</abstract>
<identifier type="citekey">ultes-etal-2017-reward</identifier>
<identifier type="doi">10.18653/v1/W17-5509</identifier>
<location>
<url>https://aclanthology.org/W17-5509</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>65</start>
<end>70</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Reward-Balancing for Statistical Spoken Dialogue Systems using Multi-objective Reinforcement Learning
%A Ultes, Stefan
%A Budzianowski, Paweł
%A Casanueva, Iñigo
%A Mrkšić, Nikola
%A Rojas-Barahona, Lina M.
%A Su, Pei-Hao
%A Wen, Tsung-Hsien
%A Gašić, Milica
%A Young, Steve
%Y Jokinen, Kristiina
%Y Stede, Manfred
%Y DeVault, David
%Y Louis, Annie
%S Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue
%D 2017
%8 August
%I Association for Computational Linguistics
%C Saarbrücken, Germany
%F ultes-etal-2017-reward
%X Reinforcement learning is widely used for dialogue policy optimization where the reward function often consists of more than one component, e.g., the dialogue success and the dialogue length. In this work, we propose a structured method for finding a good balance between these components by searching for the optimal reward component weighting. To render this search feasible, we use multi-objective reinforcement learning to significantly reduce the number of training dialogues required. We apply our proposed method to find optimized component weights for six domains and compare them to a default baseline.
%R 10.18653/v1/W17-5509
%U https://aclanthology.org/W17-5509
%U https://doi.org/10.18653/v1/W17-5509
%P 65-70
Markdown (Informal)
[Reward-Balancing for Statistical Spoken Dialogue Systems using Multi-objective Reinforcement Learning](https://aclanthology.org/W17-5509) (Ultes et al., SIGDIAL 2017)
ACL
- Stefan Ultes, Paweł Budzianowski, Iñigo Casanueva, Nikola Mrkšić, Lina M. Rojas-Barahona, Pei-Hao Su, Tsung-Hsien Wen, Milica Gašić, and Steve Young. 2017. Reward-Balancing for Statistical Spoken Dialogue Systems using Multi-objective Reinforcement Learning. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 65–70, Saarbrücken, Germany. Association for Computational Linguistics.