@InProceedings{schnober-EtAl:2016:COLING,
  author    = {Schnober, Carsten  and  Eger, Steffen  and  Do Dinh, Erik-L\^{a}n  and  Gurevych, Iryna},
  title     = {Still not there? Comparing Traditional Sequence-to-Sequence Models to Encoder-Decoder Neural Networks on Monotone String Translation Tasks},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {1703--1714},
  abstract  = {We analyze the performance of encoder-decoder neural models and compare them
	with well-known established methods. The latter represent different classes of
	traditional approaches that are applied to the monotone sequence-to-sequence
	tasks OCR post-correction, spelling correction, grapheme-to-phoneme conversion,
	and lemmatization.
	Such tasks are of practical relevance for various higher-level research fields
	including digital humanities, automatic text correction, and speech
	recognition. 
	We investigate how well generic deep-learning approaches adapt to these tasks,
	and how they perform in comparison with established and more specialized
	methods, including our own adaptation of pruned CRFs.},
  url       = {http://aclweb.org/anthology/C16-1160}
}

