@InProceedings{shu-miura:2016:WAT2016,
  author    = {Shu, Raphael  and  Miura, Akiva},
  title     = {Residual Stacking of RNNs for Neural Machine Translation},
  booktitle = {Proceedings of the 3rd Workshop on Asian Translation (WAT2016)},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {223--229},
  abstract  = {To enhance Neural Machine Translation models, several obvious ways such as
	enlarging the hidden size of recurrent layers and stacking multiple layers of
	RNN can be considered. Surprisingly, we observe that using naively stacked RNNs
	in the decoder slows down the training and leads to degradation in performance.
	In this paper, We demonstrate that applying residual connections in the depth
	of stacked RNNs can help the optimization, which is referred to as residual
	stacking. In empirical evaluation, residual stacking of decoder RNNs gives
	superior results compared to other methods of enhancing the model with a fixed
	parameter budget. Our submitted systems in WAT2016 are based on a NMT model
	ensemble with residual stacking in the decoder. To further improve the
	performance, we also attempt various methods of system combination in our
	experiments.},
  url       = {http://aclweb.org/anthology/W16-4623}
}

