@InProceedings{joseph-EtAl:2017:EMNLP2017,
  author    = {Joseph, Kenneth  and  Friedland, Lisa  and  Hobbs, William  and  Lazer, David  and  Tsur, Oren},
  title     = {ConStance: Modeling Annotation Contexts to Improve Stance Classification},
  booktitle = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing},
  month     = {September},
  year      = {2017},
  address   = {Copenhagen, Denmark},
  publisher = {Association for Computational Linguistics},
  pages     = {1115--1124},
  abstract  = {Manual annotations are a prerequisite for many applications of machine
	learning.
	However, weaknesses in the annotation process itself are easy to overlook. In
	particular, scholars often choose what information to give to annotators
	without examining these decisions empirically.
	For subjective tasks such as sentiment analysis, sarcasm, and stance detection,
	such choices can impact results. 
	Here, for the task of political stance detection on Twitter, we show that
	providing
	too little context can result in noisy and uncertain annotations, 
	whereas providing too strong a context may cause it to outweigh other signals.
	To characterize and reduce these biases, we develop ConStance, a general model
	for reasoning about annotations across information 
	conditions. 
	Given conflicting labels produced by multiple annotators seeing the same
	instances with different contexts, ConStance simultaneously 
	estimates gold standard labels and also learns a classifier for new instances.
	We show that the classifier learned by ConStance outperforms 
	a variety of baselines at predicting political stance, while the model's
	interpretable parameters shed light on the effects of each context.},
  url       = {https://www.aclweb.org/anthology/D17-1116}
}

