@inproceedings{kulhanek-etal-2021-augpt,
title = "{AuGPT}: Auxiliary Tasks and Data Augmentation for End-To-End Dialogue with Pre-Trained Language Models",
author = "Kulh{\'a}nek, Jon{\'a}{\v{s}} and
Hude{\v{c}}ek, Vojt{\v{e}}ch and
Nekvinda, Tom{\'a}{\v{s}} and
Du{\v{s}}ek, Ond{\v{r}}ej",
editor = "Papangelis, Alexandros and
Budzianowski, Pawe{\l} and
Liu, Bing and
Nouri, Elnaz and
Rastogi, Abhinav and
Chen, Yun-Nung",
booktitle = "Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4convai-1.19/",
doi = "10.18653/v1/2021.nlp4convai-1.19",
pages = "198--210",
abstract = "Attention-based pre-trained language models such as GPT-2 brought considerable progress to end-to-end dialogue modelling. However, they also present considerable risks for task-oriented dialogue, such as lack of knowledge grounding or diversity. To address these issues, we introduce modified training objectives for language model finetuning, and we employ massive data augmentation via back-translation to increase the diversity of the training data. We further examine the possibilities of combining data from multiples sources to improve performance on the target dataset. We carefully evaluate our contributions with both human and automatic methods. Our model substantially outperforms the baseline on the MultiWOZ data and shows competitive performance with state of the art in both automatic and human evaluation."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="kulhanek-etal-2021-augpt">
<titleInfo>
<title>AuGPT: Auxiliary Tasks and Data Augmentation for End-To-End Dialogue with Pre-Trained Language Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jonáš</namePart>
<namePart type="family">Kulhánek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vojtěch</namePart>
<namePart type="family">Hudeček</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tomáš</namePart>
<namePart type="family">Nekvinda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ondřej</namePart>
<namePart type="family">Dušek</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paweł</namePart>
<namePart type="family">Budzianowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elnaz</namePart>
<namePart type="family">Nouri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Attention-based pre-trained language models such as GPT-2 brought considerable progress to end-to-end dialogue modelling. However, they also present considerable risks for task-oriented dialogue, such as lack of knowledge grounding or diversity. To address these issues, we introduce modified training objectives for language model finetuning, and we employ massive data augmentation via back-translation to increase the diversity of the training data. We further examine the possibilities of combining data from multiples sources to improve performance on the target dataset. We carefully evaluate our contributions with both human and automatic methods. Our model substantially outperforms the baseline on the MultiWOZ data and shows competitive performance with state of the art in both automatic and human evaluation.</abstract>
<identifier type="citekey">kulhanek-etal-2021-augpt</identifier>
<identifier type="doi">10.18653/v1/2021.nlp4convai-1.19</identifier>
<location>
<url>https://aclanthology.org/2021.nlp4convai-1.19/</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>198</start>
<end>210</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T AuGPT: Auxiliary Tasks and Data Augmentation for End-To-End Dialogue with Pre-Trained Language Models
%A Kulhánek, Jonáš
%A Hudeček, Vojtěch
%A Nekvinda, Tomáš
%A Dušek, Ondřej
%Y Papangelis, Alexandros
%Y Budzianowski, Paweł
%Y Liu, Bing
%Y Nouri, Elnaz
%Y Rastogi, Abhinav
%Y Chen, Yun-Nung
%S Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F kulhanek-etal-2021-augpt
%X Attention-based pre-trained language models such as GPT-2 brought considerable progress to end-to-end dialogue modelling. However, they also present considerable risks for task-oriented dialogue, such as lack of knowledge grounding or diversity. To address these issues, we introduce modified training objectives for language model finetuning, and we employ massive data augmentation via back-translation to increase the diversity of the training data. We further examine the possibilities of combining data from multiples sources to improve performance on the target dataset. We carefully evaluate our contributions with both human and automatic methods. Our model substantially outperforms the baseline on the MultiWOZ data and shows competitive performance with state of the art in both automatic and human evaluation.
%R 10.18653/v1/2021.nlp4convai-1.19
%U https://aclanthology.org/2021.nlp4convai-1.19/
%U https://doi.org/10.18653/v1/2021.nlp4convai-1.19
%P 198-210
Markdown (Informal)
[AuGPT: Auxiliary Tasks and Data Augmentation for End-To-End Dialogue with Pre-Trained Language Models](https://aclanthology.org/2021.nlp4convai-1.19/) (Kulhánek et al., NLP4ConvAI 2021)
ACL