@InProceedings{volkova-EtAl:2017:Short,
  author    = {Volkova, Svitlana  and  Shaffer, Kyle  and  Jang, Jin Yea  and  Hodas, Nathan},
  title     = {Separating Facts from Fiction: Linguistic Models to Classify Suspicious and Trusted News Posts on Twitter},
  booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
  month     = {July},
  year      = {2017},
  address   = {Vancouver, Canada},
  publisher = {Association for Computational Linguistics},
  pages     = {647--653},
  abstract  = {Pew research polls report 62 percent of U.S. adults get news on social media
	(Gottfried and Shearer, 2016). In a December poll, 64 percent of U.S. adults
	said that “made-up news” has caused a “great deal of confusion” about
	the facts of current events (Barthel et al., 2016). Fabricated stories in
	social media, ranging from deliberate propaganda to hoaxes and satire,
	contributes to this confusion in addition to having serious effects on global
	stability.
	In this work we build predictive models to classify 130 thousand news posts as
	suspicious or verified, and predict four sub-types of suspicious news --
	satire, hoaxes, clickbait and propaganda. We show that neural network models
	trained on tweet content and social network interactions outperform lexical
	models. Unlike previous work on deception detection, we find that adding syntax
	and grammar features to our models does not improve performance. Incorporating
	linguistic features improves classification results, however, social
	interaction features are most in- formative for finer-grained separation be-
	tween four types of suspicious news posts.},
  url       = {http://aclweb.org/anthology/P17-2102}
}

