@inproceedings{veron-etal-2022-attention,
title = "Attention Modulation for Zero-Shot Cross-Domain Dialogue State Tracking",
author = "Veron, Mathilde and
Galibert, Olivier and
Bernard, Guillaume and
Rosset, Sophie",
editor = "Braud, Chloe and
Hardmeier, Christian and
Li, Junyi Jessy and
Loaiciga, Sharid and
Strube, Michael and
Zeldes, Amir",
booktitle = "Proceedings of the 3rd Workshop on Computational Approaches to Discourse",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea and Online",
publisher = "International Conference on Computational Linguistics",
url = "https://aclanthology.org/2022.codi-1.11",
pages = "86--91",
abstract = "Dialog state tracking (DST) is a core step for task-oriented dialogue systems aiming to track the user{'}s current goal during a dialogue. Recently a special focus has been put on applying existing DST models to new domains, in other words performing zero-shot cross-domain transfer. While recent state-of-the-art models leverage large pre-trained language models, no work has been made on understanding and improving the results of first developed zero-shot models like SUMBT. In this paper, we thus propose to improve SUMBT zero-shot results on MultiWOZ by using attention modulation during inference. This method improves SUMBT zero-shot results significantly on two domains and does not worsen the initial performance with the great advantage of needing no additional training.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="veron-etal-2022-attention">
<titleInfo>
<title>Attention Modulation for Zero-Shot Cross-Domain Dialogue State Tracking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mathilde</namePart>
<namePart type="family">Veron</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Olivier</namePart>
<namePart type="family">Galibert</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guillaume</namePart>
<namePart type="family">Bernard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sophie</namePart>
<namePart type="family">Rosset</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Computational Approaches to Discourse</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chloe</namePart>
<namePart type="family">Braud</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Hardmeier</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Junyi</namePart>
<namePart type="given">Jessy</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sharid</namePart>
<namePart type="family">Loaiciga</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Strube</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amir</namePart>
<namePart type="family">Zeldes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>International Conference on Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea and Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Dialog state tracking (DST) is a core step for task-oriented dialogue systems aiming to track the user’s current goal during a dialogue. Recently a special focus has been put on applying existing DST models to new domains, in other words performing zero-shot cross-domain transfer. While recent state-of-the-art models leverage large pre-trained language models, no work has been made on understanding and improving the results of first developed zero-shot models like SUMBT. In this paper, we thus propose to improve SUMBT zero-shot results on MultiWOZ by using attention modulation during inference. This method improves SUMBT zero-shot results significantly on two domains and does not worsen the initial performance with the great advantage of needing no additional training.</abstract>
<identifier type="citekey">veron-etal-2022-attention</identifier>
<location>
<url>https://aclanthology.org/2022.codi-1.11</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>86</start>
<end>91</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Attention Modulation for Zero-Shot Cross-Domain Dialogue State Tracking
%A Veron, Mathilde
%A Galibert, Olivier
%A Bernard, Guillaume
%A Rosset, Sophie
%Y Braud, Chloe
%Y Hardmeier, Christian
%Y Li, Junyi Jessy
%Y Loaiciga, Sharid
%Y Strube, Michael
%Y Zeldes, Amir
%S Proceedings of the 3rd Workshop on Computational Approaches to Discourse
%D 2022
%8 October
%I International Conference on Computational Linguistics
%C Gyeongju, Republic of Korea and Online
%F veron-etal-2022-attention
%X Dialog state tracking (DST) is a core step for task-oriented dialogue systems aiming to track the user’s current goal during a dialogue. Recently a special focus has been put on applying existing DST models to new domains, in other words performing zero-shot cross-domain transfer. While recent state-of-the-art models leverage large pre-trained language models, no work has been made on understanding and improving the results of first developed zero-shot models like SUMBT. In this paper, we thus propose to improve SUMBT zero-shot results on MultiWOZ by using attention modulation during inference. This method improves SUMBT zero-shot results significantly on two domains and does not worsen the initial performance with the great advantage of needing no additional training.
%U https://aclanthology.org/2022.codi-1.11
%P 86-91
Markdown (Informal)
[Attention Modulation for Zero-Shot Cross-Domain Dialogue State Tracking](https://aclanthology.org/2022.codi-1.11) (Veron et al., CODI 2022)
ACL