@inproceedings{finch-choi-2024-diverse,
title = "Diverse and Effective Synthetic Data Generation for Adaptable Zero-Shot Dialogue State Tracking",
author = "Finch, James D. and
Choi, Jinho D.",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.findings-emnlp.731/",
doi = "10.18653/v1/2024.findings-emnlp.731",
pages = "12527--12544",
abstract = "We demonstrate substantial performance gains in zero-shot dialogue state tracking (DST) by enhancing training data diversity through synthetic data generation.Existing DST datasets are severely limited in the number of application domains and slot types they cover due to the high costs of data collection, restricting their adaptability to new domains.This work addresses this challenge with a novel, fully automatic data generation approach that creates synthetic zero-shot DST datasets.Distinguished from previous methods, our approach can generate dialogues across a massive range of application domains, complete with silver-standard dialogue state annotations and slot descriptions.This technique is used to create the D0T dataset for training zero-shot DST models, encompassing an unprecedented 1,000+ domains. Experiments on the MultiWOZ benchmark show that training models on diverse synthetic data improves Joint Goal Accuracy by 6.7{\%}, achieving results competitive with models 13.5 times larger than ours."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="finch-choi-2024-diverse">
<titleInfo>
<title>Diverse and Effective Synthetic Data Generation for Adaptable Zero-Shot Dialogue State Tracking</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="given">D</namePart>
<namePart type="family">Finch</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jinho</namePart>
<namePart type="given">D</namePart>
<namePart type="family">Choi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2024</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yaser</namePart>
<namePart type="family">Al-Onaizan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, Florida, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We demonstrate substantial performance gains in zero-shot dialogue state tracking (DST) by enhancing training data diversity through synthetic data generation.Existing DST datasets are severely limited in the number of application domains and slot types they cover due to the high costs of data collection, restricting their adaptability to new domains.This work addresses this challenge with a novel, fully automatic data generation approach that creates synthetic zero-shot DST datasets.Distinguished from previous methods, our approach can generate dialogues across a massive range of application domains, complete with silver-standard dialogue state annotations and slot descriptions.This technique is used to create the D0T dataset for training zero-shot DST models, encompassing an unprecedented 1,000+ domains. Experiments on the MultiWOZ benchmark show that training models on diverse synthetic data improves Joint Goal Accuracy by 6.7%, achieving results competitive with models 13.5 times larger than ours.</abstract>
<identifier type="citekey">finch-choi-2024-diverse</identifier>
<identifier type="doi">10.18653/v1/2024.findings-emnlp.731</identifier>
<location>
<url>https://aclanthology.org/2024.findings-emnlp.731/</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>12527</start>
<end>12544</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Diverse and Effective Synthetic Data Generation for Adaptable Zero-Shot Dialogue State Tracking
%A Finch, James D.
%A Choi, Jinho D.
%Y Al-Onaizan, Yaser
%Y Bansal, Mohit
%Y Chen, Yun-Nung
%S Findings of the Association for Computational Linguistics: EMNLP 2024
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, Florida, USA
%F finch-choi-2024-diverse
%X We demonstrate substantial performance gains in zero-shot dialogue state tracking (DST) by enhancing training data diversity through synthetic data generation.Existing DST datasets are severely limited in the number of application domains and slot types they cover due to the high costs of data collection, restricting their adaptability to new domains.This work addresses this challenge with a novel, fully automatic data generation approach that creates synthetic zero-shot DST datasets.Distinguished from previous methods, our approach can generate dialogues across a massive range of application domains, complete with silver-standard dialogue state annotations and slot descriptions.This technique is used to create the D0T dataset for training zero-shot DST models, encompassing an unprecedented 1,000+ domains. Experiments on the MultiWOZ benchmark show that training models on diverse synthetic data improves Joint Goal Accuracy by 6.7%, achieving results competitive with models 13.5 times larger than ours.
%R 10.18653/v1/2024.findings-emnlp.731
%U https://aclanthology.org/2024.findings-emnlp.731/
%U https://doi.org/10.18653/v1/2024.findings-emnlp.731
%P 12527-12544
Markdown (Informal)
[Diverse and Effective Synthetic Data Generation for Adaptable Zero-Shot Dialogue State Tracking](https://aclanthology.org/2024.findings-emnlp.731/) (Finch & Choi, Findings 2024)
ACL