@InProceedings{zhao-zhao-eskenazi:2017:Long,
  author    = {Zhao, Tiancheng  and  Zhao, Ran  and  Eskenazi, Maxine},
  title     = {Learning Discourse-level Diversity for Neural Dialog Models using Conditional Variational Autoencoders},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {654--664},
  abstract  = {While recent neural encoder-decoder models have shown great promise in modeling
	open-domain conversations, they often generate dull and generic responses.
	Unlike past work that has focused on diversifying the output of the decoder
	from word-level to alleviate this problem, we present a novel framework based
	on conditional variational autoencoders that capture the discourse-level
	diversity in the encoder. Our model uses latent variables to learn a
	distribution over potential conversational intents and generates diverse
	responses using only greedy decoders. We have further developed a novel variant
	that is integrated with linguistic prior knowledge for better performance.
	Finally, the training procedure is improved through introducing a bag-of-word
	loss. Our proposed models have been validated to generate significantly more
	diverse responses than baseline approaches and exhibit competence of
	discourse-level decision-making.},
  url       = {http://aclweb.org/anthology/P17-1061}
}

