@inproceedings{ortega-vu-2017-neural,
title = "Neural-based Context Representation Learning for Dialog Act Classification",
author = "Ortega, Daniel and
Vu, Ngoc Thang",
editor = "Jokinen, Kristiina and
Stede, Manfred and
DeVault, David and
Louis, Annie",
booktitle = "Proceedings of the 18th Annual {SIG}dial Meeting on Discourse and Dialogue",
month = aug,
year = "2017",
address = {Saarbr{\"u}cken, Germany},
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-5530",
doi = "10.18653/v1/W17-5530",
pages = "247--252",
abstract = "We explore context representation learning methods in neural-based models for dialog act classification. We propose and compare extensively different methods which combine recurrent neural network architectures and attention mechanisms (AMs) at different context levels. Our experimental results on two benchmark datasets show consistent improvements compared to the models without contextual information and reveal that the most suitable AM in the architecture depends on the nature of the dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ortega-vu-2017-neural">
<titleInfo>
<title>Neural-based Context Representation Learning for Dialog Act Classification</title>
</titleInfo>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Ortega</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ngoc</namePart>
<namePart type="given">Thang</namePart>
<namePart type="family">Vu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kristiina</namePart>
<namePart type="family">Jokinen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Manfred</namePart>
<namePart type="family">Stede</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">DeVault</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Annie</namePart>
<namePart type="family">Louis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Saarbrücken, Germany</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We explore context representation learning methods in neural-based models for dialog act classification. We propose and compare extensively different methods which combine recurrent neural network architectures and attention mechanisms (AMs) at different context levels. Our experimental results on two benchmark datasets show consistent improvements compared to the models without contextual information and reveal that the most suitable AM in the architecture depends on the nature of the dataset.</abstract>
<identifier type="citekey">ortega-vu-2017-neural</identifier>
<identifier type="doi">10.18653/v1/W17-5530</identifier>
<location>
<url>https://aclanthology.org/W17-5530</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>247</start>
<end>252</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Neural-based Context Representation Learning for Dialog Act Classification
%A Ortega, Daniel
%A Vu, Ngoc Thang
%Y Jokinen, Kristiina
%Y Stede, Manfred
%Y DeVault, David
%Y Louis, Annie
%S Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue
%D 2017
%8 August
%I Association for Computational Linguistics
%C Saarbrücken, Germany
%F ortega-vu-2017-neural
%X We explore context representation learning methods in neural-based models for dialog act classification. We propose and compare extensively different methods which combine recurrent neural network architectures and attention mechanisms (AMs) at different context levels. Our experimental results on two benchmark datasets show consistent improvements compared to the models without contextual information and reveal that the most suitable AM in the architecture depends on the nature of the dataset.
%R 10.18653/v1/W17-5530
%U https://aclanthology.org/W17-5530
%U https://doi.org/10.18653/v1/W17-5530
%P 247-252
Markdown (Informal)
[Neural-based Context Representation Learning for Dialog Act Classification](https://aclanthology.org/W17-5530) (Ortega & Vu, SIGDIAL 2017)
ACL