@InProceedings{serban-EtAl:2017:EMNLP2017,
  author    = {Serban, Iulian Vlad  and  Ororbia, Alexander G.  and  Pineau, Joelle  and  Courville, Aaron},
  title     = {Piecewise Latent Variables for Neural Variational Text Processing},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {422--432},
  abstract  = {Advances in neural variational inference have facilitated the learning of
	powerful directed graphical models with continuous latent variables, such as
	variational autoencoders. The hope is that such models will learn to represent
	rich, multi-modal latent factors in real-world data, such as natural language
	text. However, current models often assume simplistic priors on the latent
	variables - such as the uni-modal Gaussian distribution - which are incapable
	of representing complex latent factors efficiently. To overcome this
	restriction, we propose the simple, but highly flexible, piecewise constant
	distribution. This distribution has the capacity to represent an exponential
	number of modes of a latent target distribution, while remaining mathematically
	tractable. Our results demonstrate that incorporating this new latent
	distribution into different models yields substantial improvements in natural
	language processing tasks such as document modeling and natural language
	generation for dialogue.},
  url       = {https://www.aclweb.org/anthology/D17-1043}
}

