@InProceedings{wang-EtAl:2017:I17-4,
  author    = {Wang, Min  and  Liu, Qingxun  and  Ding, Peng  and  Li, Yongbin  and  Zhou, Xiaobing},
  title     = {YNUDLG at IJCNLP-2017 Task 5: A CNN-LSTM Model with Attention for Multi-choice Question Answering in Examinations},
  booktitle = {Proceedings of the IJCNLP 2017, Shared Tasks},
  month     = {December},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {194--198},
  abstract  = {In this paper, we perform convolutional neural networks (CNN) to learn the
	joint representations of question-answer pairs first, then use the joint
	representations as the inputs of the long short-term memory (LSTM) with
	attention to learn the answer sequence of a question for labeling the matching
	quality of each answer. We also incorporating external knowledge by training
	Word2Vec on Flashcards data, thus we get more compact embedding. Experimental
	results show that our method achieves better or comparable performance compared
	with the baseline system. The proposed approach achieves the accuracy of 0.39,
	0.42 in English valid set, test set, respectively.},
  url       = {http://www.aclweb.org/anthology/I17-4032}
}

