@inproceedings{vasileva-etal-2019-takes,
title = "It Takes Nine to Smell a Rat: Neural Multi-Task Learning for Check-Worthiness Prediction",
author = "Vasileva, Slavena and
Atanasova, Pepa and
M{\`a}rquez, Llu{\'\i}s and
Barr{\'o}n-Cede{\~n}o, Alberto and
Nakov, Preslav",
editor = "Mitkov, Ruslan and
Angelova, Galia",
booktitle = "Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)",
month = sep,
year = "2019",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd.",
url = "https://aclanthology.org/R19-1141",
doi = "10.26615/978-954-452-056-4_141",
pages = "1229--1239",
abstract = "We propose a multi-task deep-learning approach for estimating the check-worthiness of claims in political debates. Given a political debate, such as the 2016 US Presidential and Vice-Presidential ones, the task is to predict which statements in the debate should be prioritized for fact-checking. While different fact-checking organizations would naturally make different choices when analyzing the same debate, we show that it pays to learn from multiple sources simultaneously (PolitiFact, FactCheck, ABC, CNN, NPR, NYT, Chicago Tribune, The Guardian, and Washington Post) in a multi-task learning setup, even when a particular source is chosen as a target to imitate. Our evaluation shows state-of-the-art results on a standard dataset for the task of check-worthiness prediction.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="vasileva-etal-2019-takes">
<titleInfo>
<title>It Takes Nine to Smell a Rat: Neural Multi-Task Learning for Check-Worthiness Prediction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Slavena</namePart>
<namePart type="family">Vasileva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pepa</namePart>
<namePart type="family">Atanasova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lluís</namePart>
<namePart type="family">Màrquez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Barrón-Cedeño</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Preslav</namePart>
<namePart type="family">Nakov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-09</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ruslan</namePart>
<namePart type="family">Mitkov</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Galia</namePart>
<namePart type="family">Angelova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>INCOMA Ltd.</publisher>
<place>
<placeTerm type="text">Varna, Bulgaria</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose a multi-task deep-learning approach for estimating the check-worthiness of claims in political debates. Given a political debate, such as the 2016 US Presidential and Vice-Presidential ones, the task is to predict which statements in the debate should be prioritized for fact-checking. While different fact-checking organizations would naturally make different choices when analyzing the same debate, we show that it pays to learn from multiple sources simultaneously (PolitiFact, FactCheck, ABC, CNN, NPR, NYT, Chicago Tribune, The Guardian, and Washington Post) in a multi-task learning setup, even when a particular source is chosen as a target to imitate. Our evaluation shows state-of-the-art results on a standard dataset for the task of check-worthiness prediction.</abstract>
<identifier type="citekey">vasileva-etal-2019-takes</identifier>
<identifier type="doi">10.26615/978-954-452-056-4_141</identifier>
<location>
<url>https://aclanthology.org/R19-1141</url>
</location>
<part>
<date>2019-09</date>
<extent unit="page">
<start>1229</start>
<end>1239</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T It Takes Nine to Smell a Rat: Neural Multi-Task Learning for Check-Worthiness Prediction
%A Vasileva, Slavena
%A Atanasova, Pepa
%A Màrquez, Lluís
%A Barrón-Cedeño, Alberto
%A Nakov, Preslav
%Y Mitkov, Ruslan
%Y Angelova, Galia
%S Proceedings of the International Conference on Recent Advances in Natural Language Processing (RANLP 2019)
%D 2019
%8 September
%I INCOMA Ltd.
%C Varna, Bulgaria
%F vasileva-etal-2019-takes
%X We propose a multi-task deep-learning approach for estimating the check-worthiness of claims in political debates. Given a political debate, such as the 2016 US Presidential and Vice-Presidential ones, the task is to predict which statements in the debate should be prioritized for fact-checking. While different fact-checking organizations would naturally make different choices when analyzing the same debate, we show that it pays to learn from multiple sources simultaneously (PolitiFact, FactCheck, ABC, CNN, NPR, NYT, Chicago Tribune, The Guardian, and Washington Post) in a multi-task learning setup, even when a particular source is chosen as a target to imitate. Our evaluation shows state-of-the-art results on a standard dataset for the task of check-worthiness prediction.
%R 10.26615/978-954-452-056-4_141
%U https://aclanthology.org/R19-1141
%U https://doi.org/10.26615/978-954-452-056-4_141
%P 1229-1239
Markdown (Informal)
[It Takes Nine to Smell a Rat: Neural Multi-Task Learning for Check-Worthiness Prediction](https://aclanthology.org/R19-1141) (Vasileva et al., RANLP 2019)
ACL