@inproceedings{romero-etal-2021-task,
title = "A Task-Oriented Dialogue Architecture via Transformer Neural Language Models and Symbolic Injection",
author = "Romero, Oscar J. and
Wang, Antian and
Zimmerman, John and
Steinfeld, Aaron and
Tomasic, Anthony",
editor = "Li, Haizhou and
Levow, Gina-Anne and
Yu, Zhou and
Gupta, Chitralekha and
Sisman, Berrak and
Cai, Siqi and
Vandyke, David and
Dethlefs, Nina and
Wu, Yan and
Li, Junyi Jessy",
booktitle = "Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue",
month = jul,
year = "2021",
address = "Singapore and Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.sigdial-1.46",
doi = "10.18653/v1/2021.sigdial-1.46",
pages = "438--444",
abstract = "Recently, transformer language models have been applied to build both task- and non-task-oriented dialogue systems. Although transformers perform well on most of the NLP tasks, they perform poorly on context retrieval and symbolic reasoning. Our work aims to address this limitation by embedding the model in an operational loop that blends both natural language generation and symbolic injection. We evaluated our system on the multi-domain DSTC8 data set and reported joint goal accuracy of 75.8{\%} (ranked among the first half positions), intent accuracy of 97.4{\%} (which is higher than the reported literature), and a 15{\%} improvement for success rate compared to a baseline with no symbolic injection. These promising results suggest that transformer language models can not only generate proper system responses but also symbolic representations that can further be used to enhance the overall quality of the dialogue management as well as serving as scaffolding for complex conversational reasoning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="romero-etal-2021-task">
<titleInfo>
<title>A Task-Oriented Dialogue Architecture via Transformer Neural Language Models and Symbolic Injection</title>
</titleInfo>
<name type="personal">
<namePart type="given">Oscar</namePart>
<namePart type="given">J</namePart>
<namePart type="family">Romero</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Antian</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">John</namePart>
<namePart type="family">Zimmerman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aaron</namePart>
<namePart type="family">Steinfeld</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anthony</namePart>
<namePart type="family">Tomasic</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Haizhou</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gina-Anne</namePart>
<namePart type="family">Levow</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhou</namePart>
<namePart type="family">Yu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chitralekha</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Berrak</namePart>
<namePart type="family">Sisman</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siqi</namePart>
<namePart type="family">Cai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Vandyke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nina</namePart>
<namePart type="family">Dethlefs</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yan</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junyi</namePart>
<namePart type="given">Jessy</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore and Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recently, transformer language models have been applied to build both task- and non-task-oriented dialogue systems. Although transformers perform well on most of the NLP tasks, they perform poorly on context retrieval and symbolic reasoning. Our work aims to address this limitation by embedding the model in an operational loop that blends both natural language generation and symbolic injection. We evaluated our system on the multi-domain DSTC8 data set and reported joint goal accuracy of 75.8% (ranked among the first half positions), intent accuracy of 97.4% (which is higher than the reported literature), and a 15% improvement for success rate compared to a baseline with no symbolic injection. These promising results suggest that transformer language models can not only generate proper system responses but also symbolic representations that can further be used to enhance the overall quality of the dialogue management as well as serving as scaffolding for complex conversational reasoning.</abstract>
<identifier type="citekey">romero-etal-2021-task</identifier>
<identifier type="doi">10.18653/v1/2021.sigdial-1.46</identifier>
<location>
<url>https://aclanthology.org/2021.sigdial-1.46</url>
</location>
<part>
<date>2021-07</date>
<extent unit="page">
<start>438</start>
<end>444</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Task-Oriented Dialogue Architecture via Transformer Neural Language Models and Symbolic Injection
%A Romero, Oscar J.
%A Wang, Antian
%A Zimmerman, John
%A Steinfeld, Aaron
%A Tomasic, Anthony
%Y Li, Haizhou
%Y Levow, Gina-Anne
%Y Yu, Zhou
%Y Gupta, Chitralekha
%Y Sisman, Berrak
%Y Cai, Siqi
%Y Vandyke, David
%Y Dethlefs, Nina
%Y Wu, Yan
%Y Li, Junyi Jessy
%S Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue
%D 2021
%8 July
%I Association for Computational Linguistics
%C Singapore and Online
%F romero-etal-2021-task
%X Recently, transformer language models have been applied to build both task- and non-task-oriented dialogue systems. Although transformers perform well on most of the NLP tasks, they perform poorly on context retrieval and symbolic reasoning. Our work aims to address this limitation by embedding the model in an operational loop that blends both natural language generation and symbolic injection. We evaluated our system on the multi-domain DSTC8 data set and reported joint goal accuracy of 75.8% (ranked among the first half positions), intent accuracy of 97.4% (which is higher than the reported literature), and a 15% improvement for success rate compared to a baseline with no symbolic injection. These promising results suggest that transformer language models can not only generate proper system responses but also symbolic representations that can further be used to enhance the overall quality of the dialogue management as well as serving as scaffolding for complex conversational reasoning.
%R 10.18653/v1/2021.sigdial-1.46
%U https://aclanthology.org/2021.sigdial-1.46
%U https://doi.org/10.18653/v1/2021.sigdial-1.46
%P 438-444
Markdown (Informal)
[A Task-Oriented Dialogue Architecture via Transformer Neural Language Models and Symbolic Injection](https://aclanthology.org/2021.sigdial-1.46) (Romero et al., SIGDIAL 2021)
ACL