@InProceedings{schmaltz-EtAl:2017:EMNLP2017,
  author    = {Schmaltz, Allen  and  Kim, Yoon  and  Rush, Alexander  and  Shieber, Stuart},
  title     = {Adapting Sequence Models for Sentence Correction},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {2807--2813},
  abstract  = {In a controlled experiment of sequence-to-sequence approaches for the task of
	sentence correction, we find that character-based models are generally more
	effective than word-based models and models that encode subword information via
	convolutions, and that modeling the output data as a series of diffs improves
	effectiveness over standard approaches. Our strongest sequence-to-sequence
	model improves over our strongest phrase-based statistical machine translation
	model, with access to the same data, by 6 M2 (0.5 GLEU) points. Additionally,
	in the data environment of the standard CoNLL-2014 setup, we demonstrate that
	modeling (and tuning against) diffs yields similar or better M2 scores with
	simpler models and/or significantly less data than previous
	sequence-to-sequence approaches.
	Author{4}{Affiliation}},
  url       = {https://www.aclweb.org/anthology/D17-1298}
}

