@InProceedings{wang-EtAl:2017:EMNLP20175,
  author    = {Wang, Chenguang  and  Akbik, Alan  and  chiticariu, laura  and  Li, Yunyao  and  Xia, Fei  and  Xu, Anbang},
  title     = {CROWD-IN-THE-LOOP: A Hybrid Approach for Annotating Semantic Roles},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {1913--1922},
  abstract  = {Crowdsourcing has proven to be an effective method for generating labeled data
	for a range of NLP tasks. However, multiple recent attempts of using
	crowdsourcing to generate gold-labeled training data for semantic role labeling
	(SRL) reported only modest results, indicating that SRL is perhaps too
	difficult a task to be effectively crowdsourced. In this paper, we postulate
	that while producing SRL annotation does require expert involvement in general,
	a large subset of SRL labeling tasks is in fact appropriate for the crowd. We
	present a novel workflow in which we employ a classifier to identify difficult
	annotation tasks and route each task either to experts or crowd workers
	according to their difficulties. Our experimental evaluation shows that the
	proposed approach reduces the workload for experts by over two-thirds, and thus
	significantly reduces the cost of producing SRL annotation at little loss in
	quality.},
  url       = {https://www.aclweb.org/anthology/D17-1205}
}

