@InProceedings{almarwani-diab:2017:W17-13,
  author    = {Almarwani, Nada  and  Diab, Mona},
  title     = {Arabic Textual Entailment with Word Embeddings},
  booktitle = {Proceedings of the Third Arabic Natural Language Processing Workshop},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {185--190},
  abstract  = {Determining the textual entailment be- tween texts is important in many NLP
	tasks, such as summarization, question answering, and information extraction
	and retrieval. Various methods have been suggested based on external knowledge
	sources; however, such resources are not always available in all languages and
	their acquisition is typically laborious and very costly. Distributional word
	representations such as word embeddings learned over large corpora have been
	shown to capture syntactic and semantic word relationships. Such models have
	contributed to improv- ing the performance of several NLP tasks. In this paper,
	we address the problem of textual entailment in Arabic. We employ both
	traditional features and distributional representations. Crucially, we do not
	de- pend on any external resources in the pro- cess. Our suggested approach
	yields state of the art performance on a standard data set, ArbTE, achieving an
	accuracy of 76.2 % compared to state of the art of 69.3 %.},
  url       = {http://www.aclweb.org/anthology/W17-1322}
}

