@InProceedings{arras-EtAl:2017:WASSA2017,
  author    = {Arras, Leila  and  Montavon, Gr\'{e}goire  and  M\"{u}ller, Klaus-Robert  and  Samek, Wojciech},
  title     = {Explaining Recurrent Neural Network Predictions in Sentiment Analysis},
  booktitle = {Proceedings of the 8th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {159--168},
  abstract  = {Recently, a technique called Layer-wise
	Relevance Propagation (LRP) was shown
	to deliver insightful explanations in the
	form of input space relevances for un-
	derstanding feed-forward neural network
	classification decisions. In the present
	work, we extend the usage of LRP to
	recurrent neural networks. We propose
	a specific propagation rule applicable to
	multiplicative connections as they arise
	in recurrent network architectures such
	as LSTMs and GRUs. We apply our
	technique to a word-based bi-directional
	LSTM model on a five-class sentiment
	prediction task, and evaluate the result-
	ing LRP relevances both qualitatively and
	quantitatively, obtaining better results than
	a gradient-based related method which
	was used in previous work.},
  url       = {http://www.aclweb.org/anthology/W17-5221}
}

