@InProceedings{bapna-EtAl:2017:W17-55,
  author    = {Bapna, Ankur  and  Tur, Gokhan  and  Hakkani-Tur, Dilek  and  Heck, Larry},
  title     = {Sequential Dialogue Context Modeling for Spoken Language Understanding},
  booktitle = {Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue},
  month     = {August},
  year      = {2017},
  address   = {Saarbrücken, Germany},
  publisher = {Association for Computational Linguistics},
  pages     = {103--114},
  abstract  = {Spoken Language Understanding (SLU) is a key component of goal oriented
	dialogue systems that would parse user utterances into semantic frame
	representations. Traditionally SLU does not utilize the dialogue history beyond
	the previous system turn and contextual ambiguities are resolved by the
	downstream components. In this paper, we explore novel approaches for modeling
	dialogue context in a recurrent neural network (RNN) based language
	understanding system. We propose the Sequential Dialogue Encoder Network, that
	allows encoding context from the dialogue history in chronological order. We
	compare the performance of our proposed architecture with two context models,
	one that uses just the previous turn context and another that encodes dialogue
	context in a memory network, but loses the order of utterances in the dialogue
	history. Experiments with a multi-domain dialogue dataset demonstrate that the
	proposed architecture results in reduced semantic frame error rates.},
  url       = {http://aclweb.org/anthology/W17-5514}
}

