@inproceedings{ohashi-higashinaka-2022-adaptive,
title = "Adaptive Natural Language Generation for Task-oriented Dialogue via Reinforcement Learning",
author = "Ohashi, Atsumoto and
Higashinaka, Ryuichiro",
booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "International Committee on Computational Linguistics",
url = "https://aclanthology.org/2022.coling-1.19",
pages = "242--252",
abstract = "When a natural language generation (NLG) component is implemented in a real-world task-oriented dialogue system, it is necessary to generate not only natural utterances as learned on training data but also utterances adapted to the dialogue environment (e.g., noise from environmental sounds) and the user (e.g., users with low levels of understanding ability). Inspired by recent advances in reinforcement learning (RL) for language generation tasks, we propose ANTOR, a method for \textbf{A}daptive \textbf{N}atural language generation for \textbf{T}ask-\textbf{O}riented dialogue via \textbf{R}einforcement learning. In ANTOR, a natural language understanding (NLU) module, which corresponds to the user{'}s understanding of system utterances, is incorporated into the objective function of RL. If the NLG{'}s intentions are correctly conveyed to the NLU, which understands a system{'}s utterances, the NLG is given a positive reward. We conducted experiments on the MultiWOZ dataset, and we confirmed that ANTOR could generate adaptive utterances against speech recognition errors and the different vocabulary levels of users.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ohashi-higashinaka-2022-adaptive">
<titleInfo>
<title>Adaptive Natural Language Generation for Task-oriented Dialogue via Reinforcement Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Atsumoto</namePart>
<namePart type="family">Ohashi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ryuichiro</namePart>
<namePart type="family">Higashinaka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 29th International Conference on Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>International Committee on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>When a natural language generation (NLG) component is implemented in a real-world task-oriented dialogue system, it is necessary to generate not only natural utterances as learned on training data but also utterances adapted to the dialogue environment (e.g., noise from environmental sounds) and the user (e.g., users with low levels of understanding ability). Inspired by recent advances in reinforcement learning (RL) for language generation tasks, we propose ANTOR, a method for Adaptive Natural language generation for Task-Oriented dialogue via Reinforcement learning. In ANTOR, a natural language understanding (NLU) module, which corresponds to the user’s understanding of system utterances, is incorporated into the objective function of RL. If the NLG’s intentions are correctly conveyed to the NLU, which understands a system’s utterances, the NLG is given a positive reward. We conducted experiments on the MultiWOZ dataset, and we confirmed that ANTOR could generate adaptive utterances against speech recognition errors and the different vocabulary levels of users.</abstract>
<identifier type="citekey">ohashi-higashinaka-2022-adaptive</identifier>
<location>
<url>https://aclanthology.org/2022.coling-1.19</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>242</start>
<end>252</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Adaptive Natural Language Generation for Task-oriented Dialogue via Reinforcement Learning
%A Ohashi, Atsumoto
%A Higashinaka, Ryuichiro
%S Proceedings of the 29th International Conference on Computational Linguistics
%D 2022
%8 October
%I International Committee on Computational Linguistics
%C Gyeongju, Republic of Korea
%F ohashi-higashinaka-2022-adaptive
%X When a natural language generation (NLG) component is implemented in a real-world task-oriented dialogue system, it is necessary to generate not only natural utterances as learned on training data but also utterances adapted to the dialogue environment (e.g., noise from environmental sounds) and the user (e.g., users with low levels of understanding ability). Inspired by recent advances in reinforcement learning (RL) for language generation tasks, we propose ANTOR, a method for Adaptive Natural language generation for Task-Oriented dialogue via Reinforcement learning. In ANTOR, a natural language understanding (NLU) module, which corresponds to the user’s understanding of system utterances, is incorporated into the objective function of RL. If the NLG’s intentions are correctly conveyed to the NLU, which understands a system’s utterances, the NLG is given a positive reward. We conducted experiments on the MultiWOZ dataset, and we confirmed that ANTOR could generate adaptive utterances against speech recognition errors and the different vocabulary levels of users.
%U https://aclanthology.org/2022.coling-1.19
%P 242-252
Markdown (Informal)
[Adaptive Natural Language Generation for Task-oriented Dialogue via Reinforcement Learning](https://aclanthology.org/2022.coling-1.19) (Ohashi & Higashinaka, COLING 2022)
ACL