@InProceedings{qingxun-EtAl:2018:S18-1,
  author    = {Qingxun, Liu  and  Hongdou, Yao  and  Xaobing, Zhou  and  Ge, Xie},
  title     = {YNU\_AI1799 at SemEval-2018 Task 11: Machine Comprehension using Commonsense Knowledge of Different model ensemble},
  booktitle = {Proceedings of The 12th International Workshop on Semantic Evaluation},
  month     = {June},
  year      = {2018},
  address   = {New Orleans, Louisiana},
  publisher = {Association for Computational Linguistics},
  pages     = {1038--1042},
  abstract  = {In this paper, we describe a machine reading comprehension system that participated in SemEval-2018 Task 11: Machine Comprehension using commonsense knowledge. In this work, we train a series of neural network models such as multi-LSTM, BiLSTM, multi- BiLSTM-CNN and attention-based BiLSTM, etc. On top of some sub models, there are two kinds of word embedding: (a) general word embedding generated from unsupervised neural language model; and (b) position embedding generated from general word embedding. Finally, we make a hard vote on the predictions of these models and achieve relatively good result. The proposed approach achieves 8th place in Task 11 with the accuracy of 0.7213.},
  url       = {http://www.aclweb.org/anthology/S18-1173}
}

