@InProceedings{pilan-volodina-zesch:2016:COLING,
  author    = {Pil\'{a}n, Ildik\'{o}  and  Volodina, Elena  and  Zesch, Torsten},
  title     = {Predicting proficiency levels in learner writings by transferring a linguistic complexity model from expert-written coursebooks},
  booktitle = {Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers},
  month     = {December},
  year      = {2016},
  address   = {Osaka, Japan},
  publisher = {The COLING 2016 Organizing Committee},
  pages     = {2101--2111},
  abstract  = {The lack of a sufficient amount of data tailored for a task is a
	well-recognized problem for many statistical NLP methods. In this paper, we
	explore whether data sparsity can be successfully tackled when classifying
	language proficiency levels in the domain of learner-written output texts. We
	aim at overcoming data sparsity by incorporating knowledge in the trained model
	from another domain consisting of input texts written by teaching professionals
	for learners. We compare different domain adaptation techniques and find that a
	weighted combination of the two types of data performs best, which can even
	rival systems based on considerably larger amounts of in-domain data. Moreover,
	we show that normalizing errors in learners' texts can substantially improve
	classification when level-annotated in-domain data is not available.},
  url       = {http://aclweb.org/anthology/C16-1198}
}

