@InProceedings{madnani-EtAl:2017:EthNLP,
  author    = {Madnani, Nitin  and  Loukina, Anastassia  and  von Davier, Alina  and  Burstein, Jill  and  Cahill, Aoife},
  title     = {Building Better Open-Source Tools to Support Fairness in Automated Scoring},
  booktitle = {Proceedings of the First ACL Workshop on Ethics in Natural Language Processing},
  month     = {April},
  year      = {2017},
  address   = {Valencia, Spain},
  publisher = {Association for Computational Linguistics},
  pages     = {41--52},
  abstract  = {Automated scoring of written and spoken responses is an NLP application that
	can significantly impact lives especially when deployed as part of high-stakes
	tests such as the GRE\textregistered~ and the TOEFL\textregistered~. Ethical considerations require that
	automated scoring algorithms treat all test- takers fairly. The educational
	measurement community has done significant research on fairness in assessments
	and automated scoring systems must incorporate their recommendations. The best
	way to do that is by making available automated, non-proprietary tools to NLP
	researchers that directly incorporate these recommendations and generate the
	analyses needed to help identify and resolve biases in their scoring systems.
	In this paper, we attempt to provide such a solution.},
  url       = {http://www.aclweb.org/anthology/W17-1605}
}

