@InProceedings{li-jurafsky:2017:EMNLP2017,
  author    = {Li, Jiwei  and  Jurafsky, Dan},
  title     = {Neural Net Models of Open-domain Discourse Coherence},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {198--209},
  abstract  = {Discourse coherence is strongly associated with text quality,
	making it important to natural language generation and understanding.
	Yet existing models of coherence focus on measuring individual aspects of
	coherence
	(lexical overlap, rhetorical structure, entity centering) in narrow domains.
	In this paper, we describe domain-independent neural models
	of discourse coherence that are capable of measuring multiple aspects of
	coherence 
	in existing sentences and can maintain coherence while generating new
	sentences.
	We study both
	discriminative models that learn to distinguish coherent from incoherent
	discourse,
	and generative models that produce coherent text,
	including a novel neural latent-variable Markovian generative model that 
	captures the latent discourse dependencies between sentences in a text.
	Our work achieves state-of-the-art performance on multiple coherence
	evaluations,
	and marks an initial step in generating coherent texts given discourse
	contexts.},
  url       = {https://www.aclweb.org/anthology/D17-1019}
}

