@InProceedings{xu-EtAl:2017:EMNLP2017,
  author    = {Xu, Zhen  and  Liu, Bingquan  and  Wang, Baoxun  and  SUN, Chengjie  and  Wang, Xiaolong  and  Wang, Zhuoran  and  Qi, Chao},
  title     = {Neural Response Generation via GAN with an Approximate Embedding Layer},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {617--626},
  abstract  = {This paper presents a Generative Adversarial Network (GAN) to model single-turn
	short-text conversations, which trains a sequence-to-sequence (Seq2Seq) network
	for response generation simultaneously with a discriminative classifier that
	measures the differences between human-produced responses and machine-generated
	ones. In addition, the proposed method introduces an approximate embedding
	layer to solve the non-differentiable problem caused by the sampling-based
	output decoding procedure in the Seq2Seq generative model. The GAN setup
	provides an effective way to avoid noninformative responses (a.k.a “safe
	responses”), which are frequently observed in traditional neural response
	generators.
	The experimental results show that the proposed approach significantly
	outperforms existing neural response generation models in diversity metrics,
	with slight increases in relevance scores as well, when evaluated on both a
	Mandarin corpus and an English corpus.},
  url       = {https://www.aclweb.org/anthology/D17-1065}
}

