@inproceedings{ma-etal-2019-triplenet,
title = "{T}riple{N}et: Triple Attention Network for Multi-Turn Response Selection in Retrieval-Based Chatbots",
author = "Ma, Wentao and
Cui, Yiming and
Shao, Nan and
He, Su and
Zhang, Wei-Nan and
Liu, Ting and
Wang, Shijin and
Hu, Guoping",
editor = "Bansal, Mohit and
Villavicencio, Aline",
booktitle = "Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K19-1069",
doi = "10.18653/v1/K19-1069",
pages = "737--746",
abstract = "We consider the importance of different utterances in the context for selecting the response usually depends on the current query. In this paper, we propose the model TripleNet to fully model the task with the triple {\textless}context, query, response{\textgreater} instead of {\textless}context, response {\textgreater} in previous works. The heart of TripleNet is a novel attention mechanism named triple attention to model the relationships within the triple at four levels. The new mechanism updates the representation of each element based on the attention with the other two concurrently and symmetrically. We match the triple {\textless}C, Q, R{\textgreater} centered on the response from char to context level for prediction. Experimental results on two large-scale multi-turn response selection datasets show that the proposed model can significantly outperform the state-of-the-art methods.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ma-etal-2019-triplenet">
<titleInfo>
<title>TripleNet: Triple Attention Network for Multi-Turn Response Selection in Retrieval-Based Chatbots</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wentao</namePart>
<namePart type="family">Ma</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yiming</namePart>
<namePart type="family">Cui</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nan</namePart>
<namePart type="family">Shao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Su</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wei-Nan</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ting</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shijin</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guoping</namePart>
<namePart type="family">Hu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohit</namePart>
<namePart type="family">Bansal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We consider the importance of different utterances in the context for selecting the response usually depends on the current query. In this paper, we propose the model TripleNet to fully model the task with the triple \textlesscontext, query, response\textgreater instead of \textlesscontext, response \textgreater in previous works. The heart of TripleNet is a novel attention mechanism named triple attention to model the relationships within the triple at four levels. The new mechanism updates the representation of each element based on the attention with the other two concurrently and symmetrically. We match the triple \textlessC, Q, R\textgreater centered on the response from char to context level for prediction. Experimental results on two large-scale multi-turn response selection datasets show that the proposed model can significantly outperform the state-of-the-art methods.</abstract>
<identifier type="citekey">ma-etal-2019-triplenet</identifier>
<identifier type="doi">10.18653/v1/K19-1069</identifier>
<location>
<url>https://aclanthology.org/K19-1069</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>737</start>
<end>746</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T TripleNet: Triple Attention Network for Multi-Turn Response Selection in Retrieval-Based Chatbots
%A Ma, Wentao
%A Cui, Yiming
%A Shao, Nan
%A He, Su
%A Zhang, Wei-Nan
%A Liu, Ting
%A Wang, Shijin
%A Hu, Guoping
%Y Bansal, Mohit
%Y Villavicencio, Aline
%S Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F ma-etal-2019-triplenet
%X We consider the importance of different utterances in the context for selecting the response usually depends on the current query. In this paper, we propose the model TripleNet to fully model the task with the triple \textlesscontext, query, response\textgreater instead of \textlesscontext, response \textgreater in previous works. The heart of TripleNet is a novel attention mechanism named triple attention to model the relationships within the triple at four levels. The new mechanism updates the representation of each element based on the attention with the other two concurrently and symmetrically. We match the triple \textlessC, Q, R\textgreater centered on the response from char to context level for prediction. Experimental results on two large-scale multi-turn response selection datasets show that the proposed model can significantly outperform the state-of-the-art methods.
%R 10.18653/v1/K19-1069
%U https://aclanthology.org/K19-1069
%U https://doi.org/10.18653/v1/K19-1069
%P 737-746
Markdown (Informal)
[TripleNet: Triple Attention Network for Multi-Turn Response Selection in Retrieval-Based Chatbots](https://aclanthology.org/K19-1069) (Ma et al., CoNLL 2019)
ACL
- Wentao Ma, Yiming Cui, Nan Shao, Su He, Wei-Nan Zhang, Ting Liu, Shijin Wang, and Guoping Hu. 2019. TripleNet: Triple Attention Network for Multi-Turn Response Selection in Retrieval-Based Chatbots. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 737–746, Hong Kong, China. Association for Computational Linguistics.