@inproceedings{rajendran-etal-2018-learning,
title = "Learning End-to-End Goal-Oriented Dialog with Multiple Answers",
author = "Rajendran, Janarthanan and
Ganhotra, Jatin and
Singh, Satinder and
Polymenakos, Lazaros",
editor = "Riloff, Ellen and
Chiang, David and
Hockenmaier, Julia and
Tsujii, Jun{'}ichi",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-1418",
doi = "10.18653/v1/D18-1418",
pages = "3834--3843",
abstract = "In a dialog, there could be multiple valid next utterances at any point. The present end-to-end neural methods for dialog do not take this into account. They learn with the assumption that at any time there is only one correct next utterance. In this work, we focus on this problem in the goal-oriented dialog setting where there are different paths to reach a goal. We propose a new method, that uses a combination of supervised learning and reinforcement learning approaches to address this issue. We also propose a new and more effective testbed, permuted-bAbI dialog tasks, by introducing multiple valid next utterances to the original-bAbI dialog tasks, which allows evaluation of end-to-end goal-oriented dialog systems in a more realistic setting. We show that there is a significant drop in performance of existing end-to-end neural methods from 81.5{\%} per-dialog accuracy on original-bAbI dialog tasks to 30.3{\%} on permuted-bAbI dialog tasks. We also show that our proposed method improves the performance and achieves 47.3{\%} per-dialog accuracy on permuted-bAbI dialog tasks. We also release permuted-bAbI dialog tasks, our proposed testbed, to the community for evaluating dialog systems in a goal-oriented setting.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="rajendran-etal-2018-learning">
<titleInfo>
<title>Learning End-to-End Goal-Oriented Dialog with Multiple Answers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Janarthanan</namePart>
<namePart type="family">Rajendran</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jatin</namePart>
<namePart type="family">Ganhotra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Satinder</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lazaros</namePart>
<namePart type="family">Polymenakos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-oct-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ellen</namePart>
<namePart type="family">Riloff</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Chiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julia</namePart>
<namePart type="family">Hockenmaier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jun’ichi</namePart>
<namePart type="family">Tsujii</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In a dialog, there could be multiple valid next utterances at any point. The present end-to-end neural methods for dialog do not take this into account. They learn with the assumption that at any time there is only one correct next utterance. In this work, we focus on this problem in the goal-oriented dialog setting where there are different paths to reach a goal. We propose a new method, that uses a combination of supervised learning and reinforcement learning approaches to address this issue. We also propose a new and more effective testbed, permuted-bAbI dialog tasks, by introducing multiple valid next utterances to the original-bAbI dialog tasks, which allows evaluation of end-to-end goal-oriented dialog systems in a more realistic setting. We show that there is a significant drop in performance of existing end-to-end neural methods from 81.5% per-dialog accuracy on original-bAbI dialog tasks to 30.3% on permuted-bAbI dialog tasks. We also show that our proposed method improves the performance and achieves 47.3% per-dialog accuracy on permuted-bAbI dialog tasks. We also release permuted-bAbI dialog tasks, our proposed testbed, to the community for evaluating dialog systems in a goal-oriented setting.</abstract>
<identifier type="citekey">rajendran-etal-2018-learning</identifier>
<identifier type="doi">10.18653/v1/D18-1418</identifier>
<location>
<url>https://aclanthology.org/D18-1418</url>
</location>
<part>
<date>2018-oct-nov</date>
<extent unit="page">
<start>3834</start>
<end>3843</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Learning End-to-End Goal-Oriented Dialog with Multiple Answers
%A Rajendran, Janarthanan
%A Ganhotra, Jatin
%A Singh, Satinder
%A Polymenakos, Lazaros
%Y Riloff, Ellen
%Y Chiang, David
%Y Hockenmaier, Julia
%Y Tsujii, Jun’ichi
%S Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing
%D 2018
%8 oct nov
%I Association for Computational Linguistics
%C Brussels, Belgium
%F rajendran-etal-2018-learning
%X In a dialog, there could be multiple valid next utterances at any point. The present end-to-end neural methods for dialog do not take this into account. They learn with the assumption that at any time there is only one correct next utterance. In this work, we focus on this problem in the goal-oriented dialog setting where there are different paths to reach a goal. We propose a new method, that uses a combination of supervised learning and reinforcement learning approaches to address this issue. We also propose a new and more effective testbed, permuted-bAbI dialog tasks, by introducing multiple valid next utterances to the original-bAbI dialog tasks, which allows evaluation of end-to-end goal-oriented dialog systems in a more realistic setting. We show that there is a significant drop in performance of existing end-to-end neural methods from 81.5% per-dialog accuracy on original-bAbI dialog tasks to 30.3% on permuted-bAbI dialog tasks. We also show that our proposed method improves the performance and achieves 47.3% per-dialog accuracy on permuted-bAbI dialog tasks. We also release permuted-bAbI dialog tasks, our proposed testbed, to the community for evaluating dialog systems in a goal-oriented setting.
%R 10.18653/v1/D18-1418
%U https://aclanthology.org/D18-1418
%U https://doi.org/10.18653/v1/D18-1418
%P 3834-3843
Markdown (Informal)
[Learning End-to-End Goal-Oriented Dialog with Multiple Answers](https://aclanthology.org/D18-1418) (Rajendran et al., EMNLP 2018)
ACL
- Janarthanan Rajendran, Jatin Ganhotra, Satinder Singh, and Lazaros Polymenakos. 2018. Learning End-to-End Goal-Oriented Dialog with Multiple Answers. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3834–3843, Brussels, Belgium. Association for Computational Linguistics.