@inproceedings{lee-etal-2022-randomized,
title = "A Randomized Link Transformer for Diverse Open-Domain Dialogue Generation",
author = "Lee, Jing Yang and
Lee, Kong Aik and
Gan, Woon Seng",
editor = "Liu, Bing and
Papangelis, Alexandros and
Ultes, Stefan and
Rastogi, Abhinav and
Chen, Yun-Nung and
Spithourakis, Georgios and
Nouri, Elnaz and
Shi, Weiyan",
booktitle = "Proceedings of the 4th Workshop on NLP for Conversational AI",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.nlp4convai-1.1",
doi = "10.18653/v1/2022.nlp4convai-1.1",
pages = "1--11",
abstract = "A major issue in open-domain dialogue generation is the agent{'}s tendency to generate repetitive and generic responses. The lack in response diversity has been addressed in recent years via the use of latent variable models, such as the Conditional Variational Auto-Encoder (CVAE), which typically involve learning a latent Gaussian distribution over potential response intents. However, due to latent variable collapse, training latent variable dialogue models are notoriously complex, requiring substantial modification to the standard training process and loss function. Other approaches proposed to improve response diversity also largely entail a significant increase in training complexity. Hence, this paper proposes a Randomized Link (RL) Transformer as an alternative to the latent variable models. The RL Transformer does not require any additional enhancements to the training process or loss function. Empirical results show that, when it comes to response diversity, the RL Transformer achieved comparable performance compared to latent variable models.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lee-etal-2022-randomized">
<titleInfo>
<title>A Randomized Link Transformer for Diverse Open-Domain Dialogue Generation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="given">Yang</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kong</namePart>
<namePart type="given">Aik</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Woon</namePart>
<namePart type="given">Seng</namePart>
<namePart type="family">Gan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 4th Workshop on NLP for Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Stefan</namePart>
<namePart type="family">Ultes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Georgios</namePart>
<namePart type="family">Spithourakis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elnaz</namePart>
<namePart type="family">Nouri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Weiyan</namePart>
<namePart type="family">Shi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dublin, Ireland</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>A major issue in open-domain dialogue generation is the agent’s tendency to generate repetitive and generic responses. The lack in response diversity has been addressed in recent years via the use of latent variable models, such as the Conditional Variational Auto-Encoder (CVAE), which typically involve learning a latent Gaussian distribution over potential response intents. However, due to latent variable collapse, training latent variable dialogue models are notoriously complex, requiring substantial modification to the standard training process and loss function. Other approaches proposed to improve response diversity also largely entail a significant increase in training complexity. Hence, this paper proposes a Randomized Link (RL) Transformer as an alternative to the latent variable models. The RL Transformer does not require any additional enhancements to the training process or loss function. Empirical results show that, when it comes to response diversity, the RL Transformer achieved comparable performance compared to latent variable models.</abstract>
<identifier type="citekey">lee-etal-2022-randomized</identifier>
<identifier type="doi">10.18653/v1/2022.nlp4convai-1.1</identifier>
<location>
<url>https://aclanthology.org/2022.nlp4convai-1.1</url>
</location>
<part>
<date>2022-05</date>
<extent unit="page">
<start>1</start>
<end>11</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T A Randomized Link Transformer for Diverse Open-Domain Dialogue Generation
%A Lee, Jing Yang
%A Lee, Kong Aik
%A Gan, Woon Seng
%Y Liu, Bing
%Y Papangelis, Alexandros
%Y Ultes, Stefan
%Y Rastogi, Abhinav
%Y Chen, Yun-Nung
%Y Spithourakis, Georgios
%Y Nouri, Elnaz
%Y Shi, Weiyan
%S Proceedings of the 4th Workshop on NLP for Conversational AI
%D 2022
%8 May
%I Association for Computational Linguistics
%C Dublin, Ireland
%F lee-etal-2022-randomized
%X A major issue in open-domain dialogue generation is the agent’s tendency to generate repetitive and generic responses. The lack in response diversity has been addressed in recent years via the use of latent variable models, such as the Conditional Variational Auto-Encoder (CVAE), which typically involve learning a latent Gaussian distribution over potential response intents. However, due to latent variable collapse, training latent variable dialogue models are notoriously complex, requiring substantial modification to the standard training process and loss function. Other approaches proposed to improve response diversity also largely entail a significant increase in training complexity. Hence, this paper proposes a Randomized Link (RL) Transformer as an alternative to the latent variable models. The RL Transformer does not require any additional enhancements to the training process or loss function. Empirical results show that, when it comes to response diversity, the RL Transformer achieved comparable performance compared to latent variable models.
%R 10.18653/v1/2022.nlp4convai-1.1
%U https://aclanthology.org/2022.nlp4convai-1.1
%U https://doi.org/10.18653/v1/2022.nlp4convai-1.1
%P 1-11
Markdown (Informal)
[A Randomized Link Transformer for Diverse Open-Domain Dialogue Generation](https://aclanthology.org/2022.nlp4convai-1.1) (Lee et al., NLP4ConvAI 2022)
ACL