@InProceedings{sha-EtAl:2016:COLING,
  author    = {Sha, Lei  and  Chang, Baobao  and  Sui, Zhifang  and  Li, Sujian},
  title     = {Reading and Thinking: Re-read LSTM Unit for Textual Entailment Recognition},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {2870--2879},
  abstract  = {Recognizing Textual Entailment (RTE) is a fundamentally important task in
	natural language processing that has many applications. The recently released
	Stanford Natural Language Inference (SNLI) corpus has made it possible to
	develop and evaluate deep neural network methods for the RTE task. Previous
	neural network based methods usually try to encode the two sentences (premise
	and hypothesis) and send them together into a multi-layer perceptron to get
	their entailment type, or use LSTM-RNN to link two sentences together while
	using attention mechanic to enhance the model’s ability. In this paper, we
	propose to use the re-read mechanic, which means to read the premise again and
	again while reading the hypothesis. After read the premise again, the model can
	get a better understanding of the premise, which can also affect the
	understanding of the hypothesis. On the contrary, a better understanding of the
	hypothesis can also affect the understanding of the premise. With the
	alternative re-read process, the model can “think” of a better decision of
	entailment type. We designed a new LSTM unit called re-read LSTM (rLSTM) to
	implement this “thinking” process. Experiments show that we achieve results
	better than current state-of-the-art equivalents.
	Author{3}{Affiliation}},
  url       = {http://aclweb.org/anthology/C16-1270}
}

