@InProceedings{newell-EtAl:2017:I17-1,
  author    = {Newell, Edward  and  Schang, Ariane  and  Margolin, Drew  and  Ruths, Derek},
  title     = {Assessing the Verifiability of Attributions in News Text},
  booktitle = {Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
  month     = {November},
  year      = {2017},
  address   = {Taipei, Taiwan},
  publisher = {Asian Federation of Natural Language Processing},
  pages     = {754--763},
  abstract  = {When reporting the news, journalists rely on the statements of stakeholders,
	experts, and officials.  The attribution of such a statement is verifiable if
	its fidelity to the source can be confirmed or denied. In this paper, we
	develop a new NLP task: determining the verifiability of an attribution based
	on linguistic cues.  We operationalize the notion of verifiability as a score
	between 0 and 1 using human judgments in a comparison-based approach.  Using
	crowdsourcing, we create a dataset of verifiability-scored attributions, and
	demonstrate a model that achieves an RMSE of 0.057 and Spearman's rank
	correlation of 0.95 to human-generated scores. We discuss the application of
	this technique to the analysis of mass media.},
  url       = {http://www.aclweb.org/anthology/I17-1076}
}

