@inproceedings{jobanputra-2019-unsupervised,
title = "Unsupervised Question Answering for Fact-Checking",
author = "Jobanputra, Mayank",
editor = "Thorne, James and
Vlachos, Andreas and
Cocarascu, Oana and
Christodoulopoulos, Christos and
Mittal, Arpit",
booktitle = "Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-6609",
doi = "10.18653/v1/D19-6609",
pages = "52--56",
abstract = "Recent Deep Learning (DL) models have succeeded in achieving human-level accuracy on various natural language tasks such as question-answering, natural language inference (NLI), and textual entailment. These tasks not only require the contextual knowledge but also the reasoning abilities to be solved efficiently. In this paper, we propose an unsupervised question-answering based approach for a similar task, fact-checking. We transform the FEVER dataset into a Cloze-task by masking named entities provided in the claims. To predict the answer token, we utilize pre-trained Bidirectional Encoder Representations from Transformers (BERT). The classifier computes label based on the correctly answered questions and a threshold. Currently, the classifier is able to classify the claims as {``}SUPPORTS{''} and {``}MANUAL{\_}REVIEW{''}. This approach achieves a label accuracy of 80.2{\%} on the development set and 80.25{\%} on the test set of the transformed dataset.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="jobanputra-2019-unsupervised">
<titleInfo>
<title>Unsupervised Question Answering for Fact-Checking</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mayank</namePart>
<namePart type="family">Jobanputra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)</title>
</titleInfo>
<name type="personal">
<namePart type="given">James</namePart>
<namePart type="family">Thorne</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oana</namePart>
<namePart type="family">Cocarascu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christos</namePart>
<namePart type="family">Christodoulopoulos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Arpit</namePart>
<namePart type="family">Mittal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent Deep Learning (DL) models have succeeded in achieving human-level accuracy on various natural language tasks such as question-answering, natural language inference (NLI), and textual entailment. These tasks not only require the contextual knowledge but also the reasoning abilities to be solved efficiently. In this paper, we propose an unsupervised question-answering based approach for a similar task, fact-checking. We transform the FEVER dataset into a Cloze-task by masking named entities provided in the claims. To predict the answer token, we utilize pre-trained Bidirectional Encoder Representations from Transformers (BERT). The classifier computes label based on the correctly answered questions and a threshold. Currently, the classifier is able to classify the claims as “SUPPORTS” and “MANUAL_REVIEW”. This approach achieves a label accuracy of 80.2% on the development set and 80.25% on the test set of the transformed dataset.</abstract>
<identifier type="citekey">jobanputra-2019-unsupervised</identifier>
<identifier type="doi">10.18653/v1/D19-6609</identifier>
<location>
<url>https://aclanthology.org/D19-6609</url>
</location>
<part>
<date>2019-11</date>
<extent unit="page">
<start>52</start>
<end>56</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Unsupervised Question Answering for Fact-Checking
%A Jobanputra, Mayank
%Y Thorne, James
%Y Vlachos, Andreas
%Y Cocarascu, Oana
%Y Christodoulopoulos, Christos
%Y Mittal, Arpit
%S Proceedings of the Second Workshop on Fact Extraction and VERification (FEVER)
%D 2019
%8 November
%I Association for Computational Linguistics
%C Hong Kong, China
%F jobanputra-2019-unsupervised
%X Recent Deep Learning (DL) models have succeeded in achieving human-level accuracy on various natural language tasks such as question-answering, natural language inference (NLI), and textual entailment. These tasks not only require the contextual knowledge but also the reasoning abilities to be solved efficiently. In this paper, we propose an unsupervised question-answering based approach for a similar task, fact-checking. We transform the FEVER dataset into a Cloze-task by masking named entities provided in the claims. To predict the answer token, we utilize pre-trained Bidirectional Encoder Representations from Transformers (BERT). The classifier computes label based on the correctly answered questions and a threshold. Currently, the classifier is able to classify the claims as “SUPPORTS” and “MANUAL_REVIEW”. This approach achieves a label accuracy of 80.2% on the development set and 80.25% on the test set of the transformed dataset.
%R 10.18653/v1/D19-6609
%U https://aclanthology.org/D19-6609
%U https://doi.org/10.18653/v1/D19-6609
%P 52-56
Markdown (Informal)
[Unsupervised Question Answering for Fact-Checking](https://aclanthology.org/D19-6609) (Jobanputra, 2019)
ACL