@InProceedings{hasan-EtAl:2016:ClinicalNLP,
  author    = {Hasan, Sadid A.  and  Liu, Bo  and  Liu, Joey  and  Qadir, Ashequl  and  Lee, Kathy  and  Datla, Vivek  and  Prakash, Aaditya  and  Farri, Oladimeji},
  title     = {Neural Clinical Paraphrase Generation with Attention},
  booktitle = {Proceedings of the Clinical Natural Language Processing Workshop (ClinicalNLP)},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {42--53},
  abstract  = {Paraphrase generation is important in various applications such as search,
	summarization, and question answering due to its ability to generate textual
	alternatives while keeping the overall meaning intact. Clinical paraphrase
	generation is especially vital in building patient-centric clinical decision
	support (CDS) applications where users are able to understand complex clinical
	jargons via easily comprehensible alternative paraphrases. This paper presents
	Neural Clinical Paraphrase Generation (NCPG), a novel approach that casts the
	task as a monolingual neural machine translation (NMT) problem. We propose an
	end-to-end neural network built on an attention-based bidirectional Recurrent
	Neural Network (RNN) architecture with an encoder-decoder framework to perform
	the task. Conventional bilingual NMT models mostly rely on word-level modeling
	and are often limited by out-of-vocabulary (OOV) issues. In contrast, we
	represent the source and target paraphrase pairs as character sequences to
	address this limitation. To the best of our knowledge, this is the first work
	that uses attention-based RNNs for clinical paraphrase generation and also
	proposes an end-to-end character-level modeling for this task. Extensive
	experiments on a large curated clinical paraphrase corpus show that the
	attention-based NCPG models achieve improvements of up to 5.2 BLEU points and
	0.5 METEOR points over a non-attention based strong baseline for word-level
	modeling, whereas further gains of up to 6.1 BLEU points and 1.3 METEOR points
	are obtained by the character-level NCPG models over their word-level
	counterparts. Overall, our models demonstrate comparable performance relative
	to the state-of-the-art phrase-based non-neural models.},
  url       = {http://aclweb.org/anthology/W16-4207}
}

