@inproceedings{beygi-etal-2022-logical,
title = "Logical Reasoning for Task Oriented Dialogue Systems",
author = "Beygi, Sajjad and
Fazel-Zarandi, Maryam and
Cervone, Alessandra and
Krishnan, Prakash and
Jonnalagadda, Siddhartha",
editor = "Malmasi, Shervin and
Rokhlenko, Oleg and
Ueffing, Nicola and
Guy, Ido and
Agichtein, Eugene and
Kallumadi, Surya",
booktitle = "Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.ecnlp-1.10",
doi = "10.18653/v1/2022.ecnlp-1.10",
pages = "68--79",
abstract = "In recent years, large pretrained models have been used in dialogue systems to improve successful task completion rates. However, lack of reasoning capabilities of dialogue platforms make it difficult to provide relevant and fluent responses, unless the designers of a conversational experience spend a considerable amount of time implementing these capabilities in external rule based modules. In this work, we propose a novel method to fine-tune pretrained transformer models such as Roberta and T5, to reason over a set of facts in a given dialogue context. Our method includes a synthetic data generation mechanism which helps the model learn logical relations, such as comparison between list of numerical values, inverse relations (and negation), inclusion and exclusion for categorical attributes, and application of a combination of attributes over both numerical and categorical values, and spoken form for numerical values, without need for additional training data. We show that the transformer based model can perform logical reasoning to answer questions when the dialogue context contains all the required information, otherwise it is able to extract appropriate constraints to pass to downstream components (e.g. a knowledge base) when partial information is available. We observe that transformer based models such as UnifiedQA-T5 can be fine-tuned to perform logical reasoning (such as numerical and categorical attributes{'} comparison) over attributes seen at training time (e.g., accuracy of 90{\%}+ for comparison of smaller than kmax=5 values over heldout test dataset).",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="beygi-etal-2022-logical">
<titleInfo>
<title>Logical Reasoning for Task Oriented Dialogue Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sajjad</namePart>
<namePart type="family">Beygi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maryam</namePart>
<namePart type="family">Fazel-Zarandi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandra</namePart>
<namePart type="family">Cervone</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Prakash</namePart>
<namePart type="family">Krishnan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Siddhartha</namePart>
<namePart type="family">Jonnalagadda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shervin</namePart>
<namePart type="family">Malmasi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oleg</namePart>
<namePart type="family">Rokhlenko</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nicola</namePart>
<namePart type="family">Ueffing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ido</namePart>
<namePart type="family">Guy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eugene</namePart>
<namePart type="family">Agichtein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Surya</namePart>
<namePart type="family">Kallumadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, large pretrained models have been used in dialogue systems to improve successful task completion rates. However, lack of reasoning capabilities of dialogue platforms make it difficult to provide relevant and fluent responses, unless the designers of a conversational experience spend a considerable amount of time implementing these capabilities in external rule based modules. In this work, we propose a novel method to fine-tune pretrained transformer models such as Roberta and T5, to reason over a set of facts in a given dialogue context. Our method includes a synthetic data generation mechanism which helps the model learn logical relations, such as comparison between list of numerical values, inverse relations (and negation), inclusion and exclusion for categorical attributes, and application of a combination of attributes over both numerical and categorical values, and spoken form for numerical values, without need for additional training data. We show that the transformer based model can perform logical reasoning to answer questions when the dialogue context contains all the required information, otherwise it is able to extract appropriate constraints to pass to downstream components (e.g. a knowledge base) when partial information is available. We observe that transformer based models such as UnifiedQA-T5 can be fine-tuned to perform logical reasoning (such as numerical and categorical attributes’ comparison) over attributes seen at training time (e.g., accuracy of 90%+ for comparison of smaller than kmax=5 values over heldout test dataset).</abstract>
<identifier type="citekey">beygi-etal-2022-logical</identifier>
<identifier type="doi">10.18653/v1/2022.ecnlp-1.10</identifier>
<location>
<url>https://aclanthology.org/2022.ecnlp-1.10</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>68</start>
<end>79</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Logical Reasoning for Task Oriented Dialogue Systems
%A Beygi, Sajjad
%A Fazel-Zarandi, Maryam
%A Cervone, Alessandra
%A Krishnan, Prakash
%A Jonnalagadda, Siddhartha
%Y Malmasi, Shervin
%Y Rokhlenko, Oleg
%Y Ueffing, Nicola
%Y Guy, Ido
%Y Agichtein, Eugene
%Y Kallumadi, Surya
%S Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5)
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F beygi-etal-2022-logical
%X In recent years, large pretrained models have been used in dialogue systems to improve successful task completion rates. However, lack of reasoning capabilities of dialogue platforms make it difficult to provide relevant and fluent responses, unless the designers of a conversational experience spend a considerable amount of time implementing these capabilities in external rule based modules. In this work, we propose a novel method to fine-tune pretrained transformer models such as Roberta and T5, to reason over a set of facts in a given dialogue context. Our method includes a synthetic data generation mechanism which helps the model learn logical relations, such as comparison between list of numerical values, inverse relations (and negation), inclusion and exclusion for categorical attributes, and application of a combination of attributes over both numerical and categorical values, and spoken form for numerical values, without need for additional training data. We show that the transformer based model can perform logical reasoning to answer questions when the dialogue context contains all the required information, otherwise it is able to extract appropriate constraints to pass to downstream components (e.g. a knowledge base) when partial information is available. We observe that transformer based models such as UnifiedQA-T5 can be fine-tuned to perform logical reasoning (such as numerical and categorical attributes’ comparison) over attributes seen at training time (e.g., accuracy of 90%+ for comparison of smaller than kmax=5 values over heldout test dataset).
%R 10.18653/v1/2022.ecnlp-1.10
%U https://aclanthology.org/2022.ecnlp-1.10
%U https://doi.org/10.18653/v1/2022.ecnlp-1.10
%P 68-79
Markdown (Informal)
[Logical Reasoning for Task Oriented Dialogue Systems](https://aclanthology.org/2022.ecnlp-1.10) (Beygi et al., ECNLP 2022)
ACL
- Sajjad Beygi, Maryam Fazel-Zarandi, Alessandra Cervone, Prakash Krishnan, and Siddhartha Jonnalagadda. 2022. Logical Reasoning for Task Oriented Dialogue Systems. In Proceedings of the Fifth Workshop on e-Commerce and NLP (ECNLP 5), pages 68–79, Dublin, Ireland. Association for Computational Linguistics.