@InProceedings{ding-EtAl:2017:Long,
  author    = {Ding, Yanzhuo  and  Liu, Yang  and  Luan, Huanbo  and  Sun, Maosong},
  title     = {Visualizing and Understanding Neural Machine Translation},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {1150--1159},
  abstract  = {While neural machine translation (NMT) has made remarkable progress in recent
	years, it is hard to interpret its internal workings due to the continuous
	representations and non-linearity of neural networks. In this work, we propose
	to use layer-wise relevance propagation (LRP) to compute the contribution of
	each contextual word to arbitrary hidden states in the attention-based
	encoder-decoder framework. We show that visualization with LRP helps to
	interpret the internal workings of NMT and analyze translation errors.},
  url       = {http://aclweb.org/anthology/P17-1106}
}

