@InProceedings{riordan-EtAl:2017:BEA,
  author    = {Riordan, Brian  and  Horbach, Andrea  and  Cahill, Aoife  and  Zesch, Torsten  and  Lee, Chong Min},
  title     = {Investigating neural architectures for short answer scoring},
  booktitle = {Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {159--168},
  abstract  = {Neural approaches to automated essay scoring have recently shown
	state-of-the-art performance. The automated essay scoring task typically
	involves a broad notion of writing quality that encompasses content, grammar,
	organization, and conventions. This differs from the short answer content
	scoring task, which focuses on content accuracy. The inputs to neural essay
	scoring models -- ngrams and embeddings -- are arguably well-suited to evaluate
	content in short answer scoring tasks. We investigate how several basic neural
	approaches similar to those used for automated essay scoring perform on short
	answer scoring. We show that neural architectures can outperform a strong
	non-neural baseline, but performance and optimal parameter settings vary across
	the more diverse types of prompts typical of short answer scoring.},
  url       = {http://www.aclweb.org/anthology/W17-5017}
}

