@InProceedings{benabacha-demnerfushman:2017:SemEval,
  author    = {Ben Abacha, Asma  and  Demner-Fushman, Dina},
  title     = {NLM\_NIH at SemEval-2017 Task 3: from Question Entailment to Question Similarity for Community Question Answering},
  booktitle = {Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017)},
  month     = {August},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {349--352},
  abstract  = {This paper describes our participation in SemEval-2017 Task 3 on Community 
	Question  Answering (cQA). The Question Similarity subtask (B) aims to rank  a
	set of related questions retrieved by a search engine according to their
	similarity to the original question. We adapted our feature-based system for
	Recognizing Question Entailment (RQE) to the question similarity task. Tested
	on cQA-B-2016 test data, our RQE system outperformed the best system of the
	2016 challenge in all measures with 77.47 MAP and 80.57 Accuracy. On cQA-B-2017
	test data, performances of all systems dropped by around 30 points. Our primary
	system obtained 44.62 MAP, 67.27 Accuracy and 47.25 F1 score. The cQA-B-2017
	best system achieved 47.22 MAP and 42.37 F1 score. Our system is ranked sixth
	in terms of MAP and third in terms of F1 out of 13 participating teams.},
  url       = {http://www.aclweb.org/anthology/S17-2057}
}

