@InProceedings{shu-nakayama:2017:NMT,
  author    = {Shu, Raphael  and  Nakayama, Hideki},
  title     = {An Empirical Study of Adequate Vision Span for Attention-Based Neural Machine Translation},
  booktitle = {Proceedings of the First Workshop on Neural Machine Translation},
  month     = {August},
  year      = {2017},
  address   = {Vancouver},
  publisher = {Association for Computational Linguistics},
  pages     = {1--10},
  abstract  = {Recently, the attention mechanism plays a key role to achieve high performance
	for Neural Machine Translation models. However, as it computes a score function
	for the encoder states in all positions at each decoding step, the attention
	model greatly increases the computational complexity. In this paper, we
	investigate the adequate vision span of attention models in the context of
	machine translation, by proposing a novel attention framework that is capable
	of reducing redundant score computation dynamically. The term "vision span"'
	means a window of the encoder states considered by the attention model in one
	step. In our experiments, we found that the average window size of vision span
	can be reduced by over 50% with modest loss in accuracy on English-Japanese and
	German-English translation tasks.},
  url       = {http://www.aclweb.org/anthology/W17-3201}
}

