@inproceedings{dai-etal-2021-dialogue,
title = "Dialogue Response Generation via Contrastive Latent Representation Learning",
author = "Dai, Shuyang and
Wang, Guoyin and
Park, Sunghyun and
Lee, Sungjin",
editor = "Papangelis, Alexandros and
Budzianowski, Pawe{\l} and
Liu, Bing and
Nouri, Elnaz and
Rastogi, Abhinav and
Chen, Yun-Nung",
booktitle = "Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI",
month = nov,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.nlp4convai-1.18",
doi = "10.18653/v1/2021.nlp4convai-1.18",
pages = "189--197",
abstract = "Large-scale auto-regressive models have achieved great success in dialogue response generation, with the help of Transformer layers. However, these models do not learn a representative latent space of the sentence distribution, making it hard to control the generation. Recent works have tried on learning sentence representations using Transformer-based framework, but do not model the context-response relationship embedded in the dialogue datasets. In this work, we aim to construct a robust sentence representation learning model, that is specifically designed for dialogue response generation, with Transformer-based encoder-decoder structure. An utterance-level contrastive learning is proposed, encoding predictive information in each context representation for its corresponding response. Extensive experiments are conducted to verify the robustness of the proposed representation learning mechanism. By using both reference-based and reference-free evaluation metrics, we provide detailed analysis on the generated sentences, demonstrating the effectiveness of our proposed model.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="dai-etal-2021-dialogue">
<titleInfo>
<title>Dialogue Response Generation via Contrastive Latent Representation Learning</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shuyang</namePart>
<namePart type="family">Dai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guoyin</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sunghyun</namePart>
<namePart type="family">Park</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sungjin</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alexandros</namePart>
<namePart type="family">Papangelis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Paweł</namePart>
<namePart type="family">Budzianowski</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elnaz</namePart>
<namePart type="family">Nouri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Abhinav</namePart>
<namePart type="family">Rastogi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Large-scale auto-regressive models have achieved great success in dialogue response generation, with the help of Transformer layers. However, these models do not learn a representative latent space of the sentence distribution, making it hard to control the generation. Recent works have tried on learning sentence representations using Transformer-based framework, but do not model the context-response relationship embedded in the dialogue datasets. In this work, we aim to construct a robust sentence representation learning model, that is specifically designed for dialogue response generation, with Transformer-based encoder-decoder structure. An utterance-level contrastive learning is proposed, encoding predictive information in each context representation for its corresponding response. Extensive experiments are conducted to verify the robustness of the proposed representation learning mechanism. By using both reference-based and reference-free evaluation metrics, we provide detailed analysis on the generated sentences, demonstrating the effectiveness of our proposed model.</abstract>
<identifier type="citekey">dai-etal-2021-dialogue</identifier>
<identifier type="doi">10.18653/v1/2021.nlp4convai-1.18</identifier>
<location>
<url>https://aclanthology.org/2021.nlp4convai-1.18</url>
</location>
<part>
<date>2021-11</date>
<extent unit="page">
<start>189</start>
<end>197</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dialogue Response Generation via Contrastive Latent Representation Learning
%A Dai, Shuyang
%A Wang, Guoyin
%A Park, Sunghyun
%A Lee, Sungjin
%Y Papangelis, Alexandros
%Y Budzianowski, Paweł
%Y Liu, Bing
%Y Nouri, Elnaz
%Y Rastogi, Abhinav
%Y Chen, Yun-Nung
%S Proceedings of the 3rd Workshop on Natural Language Processing for Conversational AI
%D 2021
%8 November
%I Association for Computational Linguistics
%C Online
%F dai-etal-2021-dialogue
%X Large-scale auto-regressive models have achieved great success in dialogue response generation, with the help of Transformer layers. However, these models do not learn a representative latent space of the sentence distribution, making it hard to control the generation. Recent works have tried on learning sentence representations using Transformer-based framework, but do not model the context-response relationship embedded in the dialogue datasets. In this work, we aim to construct a robust sentence representation learning model, that is specifically designed for dialogue response generation, with Transformer-based encoder-decoder structure. An utterance-level contrastive learning is proposed, encoding predictive information in each context representation for its corresponding response. Extensive experiments are conducted to verify the robustness of the proposed representation learning mechanism. By using both reference-based and reference-free evaluation metrics, we provide detailed analysis on the generated sentences, demonstrating the effectiveness of our proposed model.
%R 10.18653/v1/2021.nlp4convai-1.18
%U https://aclanthology.org/2021.nlp4convai-1.18
%U https://doi.org/10.18653/v1/2021.nlp4convai-1.18
%P 189-197
Markdown (Informal)
[Dialogue Response Generation via Contrastive Latent Representation Learning](https://aclanthology.org/2021.nlp4convai-1.18) (Dai et al., NLP4ConvAI 2021)
ACL