@inproceedings{newell-etal-2017-assessing,
title = "Assessing the Verifiability of Attributions in News Text",
author = "Newell, Edward and
Schang, Ariane and
Margolin, Drew and
Ruths, Derek",
editor = "Kondrak, Greg and
Watanabe, Taro",
booktitle = "Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = nov,
year = "2017",
address = "Taipei, Taiwan",
publisher = "Asian Federation of Natural Language Processing",
url = "https://aclanthology.org/I17-1076",
pages = "754--763",
abstract = "When reporting the news, journalists rely on the statements of stakeholders, experts, and officials. The attribution of such a statement is verifiable if its fidelity to the source can be confirmed or denied. In this paper, we develop a new NLP task: determining the verifiability of an attribution based on linguistic cues. We operationalize the notion of verifiability as a score between 0 and 1 using human judgments in a comparison-based approach. Using crowdsourcing, we create a dataset of verifiability-scored attributions, and demonstrate a model that achieves an RMSE of 0.057 and Spearman{'}s rank correlation of 0.95 to human-generated scores. We discuss the application of this technique to the analysis of mass media.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="newell-etal-2017-assessing">
<titleInfo>
<title>Assessing the Verifiability of Attributions in News Text</title>
</titleInfo>
<name type="personal">
<namePart type="given">Edward</namePart>
<namePart type="family">Newell</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ariane</namePart>
<namePart type="family">Schang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Drew</namePart>
<namePart type="family">Margolin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Derek</namePart>
<namePart type="family">Ruths</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Greg</namePart>
<namePart type="family">Kondrak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Taro</namePart>
<namePart type="family">Watanabe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Asian Federation of Natural Language Processing</publisher>
<place>
<placeTerm type="text">Taipei, Taiwan</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>When reporting the news, journalists rely on the statements of stakeholders, experts, and officials. The attribution of such a statement is verifiable if its fidelity to the source can be confirmed or denied. In this paper, we develop a new NLP task: determining the verifiability of an attribution based on linguistic cues. We operationalize the notion of verifiability as a score between 0 and 1 using human judgments in a comparison-based approach. Using crowdsourcing, we create a dataset of verifiability-scored attributions, and demonstrate a model that achieves an RMSE of 0.057 and Spearman’s rank correlation of 0.95 to human-generated scores. We discuss the application of this technique to the analysis of mass media.</abstract>
<identifier type="citekey">newell-etal-2017-assessing</identifier>
<location>
<url>https://aclanthology.org/I17-1076</url>
</location>
<part>
<date>2017-11</date>
<extent unit="page">
<start>754</start>
<end>763</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Assessing the Verifiability of Attributions in News Text
%A Newell, Edward
%A Schang, Ariane
%A Margolin, Drew
%A Ruths, Derek
%Y Kondrak, Greg
%Y Watanabe, Taro
%S Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)
%D 2017
%8 November
%I Asian Federation of Natural Language Processing
%C Taipei, Taiwan
%F newell-etal-2017-assessing
%X When reporting the news, journalists rely on the statements of stakeholders, experts, and officials. The attribution of such a statement is verifiable if its fidelity to the source can be confirmed or denied. In this paper, we develop a new NLP task: determining the verifiability of an attribution based on linguistic cues. We operationalize the notion of verifiability as a score between 0 and 1 using human judgments in a comparison-based approach. Using crowdsourcing, we create a dataset of verifiability-scored attributions, and demonstrate a model that achieves an RMSE of 0.057 and Spearman’s rank correlation of 0.95 to human-generated scores. We discuss the application of this technique to the analysis of mass media.
%U https://aclanthology.org/I17-1076
%P 754-763
Markdown (Informal)
[Assessing the Verifiability of Attributions in News Text](https://aclanthology.org/I17-1076) (Newell et al., IJCNLP 2017)
ACL
- Edward Newell, Ariane Schang, Drew Margolin, and Derek Ruths. 2017. Assessing the Verifiability of Attributions in News Text. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 754–763, Taipei, Taiwan. Asian Federation of Natural Language Processing.