@InProceedings{tran-zukerman-haffari:2017:EACLlong,
  author    = {Tran, Quan Hung  and  Zukerman, Ingrid  and  Haffari, Gholamreza},
  title     = {A Hierarchical Neural Model for Learning Sequences of Dialogue Acts},
  booktitle = {Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 1, Long Papers},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {428--437},
  abstract  = {We propose a novel hierarchical Recurrent Neural Network (RNN) for learning
	sequences of Dialogue Acts (DAs). The input in this task is a sequence of
	utterances (i.e., conversational contributions) comprising a sequence of
	tokens, and the output is a sequence of DA labels (one label per utterance).
	Our model leverages the hierarchical nature of dialogue data by using two
	nested RNNs that capture long-range dependencies at the dialogue level and the
	utterance level. This model is combined with an attention mechanism that
	focuses on salient tokens in utterances. Our experimental results show that our
	model outperforms strong baselines on two popular datasets, Switchboard and
	MapTask; and our detailed empirical analysis highlights the impact of each
	aspect of our model.},
  url       = {http://www.aclweb.org/anthology/E17-1041}
}

