@InProceedings{balazs-EtAl:2017:RepEval,
  author    = {Balazs, Jorge  and  Marrese-Taylor, Edison  and  Loyola, Pablo  and  Matsuo, Yutaka},
  title     = {Refining Raw Sentence Representations for Textual Entailment Recognition via Attention},
  booktitle = {Proceedings of the 2nd Workshop on Evaluating Vector Space Representations for NLP},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {51--55},
  abstract  = {In this paper we present the model used by the team Rivercorners for the 2017
	RepEval shared task. First, our model separately encodes a pair of sentences
	into variable-length representations by using a bidirectional LSTM. Later, it
	creates fixed-length raw representations by means of simple aggregation
	functions, which are then refined using an attention mechanism. Finally it
	combines the refined representations of both sentences into a single vector to
	be used for classification. With this model we obtained test accuracies of
	72.057% and 72.055% in the matched and mismatched evaluation tracks
	respectively, outperforming the LSTM baseline, and obtaining performances
	similar to a model that relies on shared information between sentences (ESIM).
	When using an ensemble both accuracies increased to 72.247% and 72.827%
	respectively.},
  url       = {http://www.aclweb.org/anthology/W17-5310}
}

