@inproceedings{yuan-vlachos-2024-zero,
title = "Zero-Shot Fact-Checking with Semantic Triples and Knowledge Graphs",
author = "Yuan, Moy and
Vlachos, Andreas",
editor = "Biswas, Russa and
Kaffee, Lucie-Aim{\'e}e and
Agarwal, Oshin and
Minervini, Pasquale and
Singh, Sameer and
de Melo, Gerard",
booktitle = "Proceedings of the 1st Workshop on Knowledge Graphs and Large Language Models (KaLLM 2024)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.kallm-1.11",
doi = "10.18653/v1/2024.kallm-1.11",
pages = "105--115",
abstract = "Despite progress in automated fact-checking, most systems require a significant amount of labeled training data, which is expensive. In this paper, we propose a novel zero-shot method, which instead of operating directly on the claim and evidence sentences, decomposes them into semantic triples augmented using external knowledge graphs, and uses large language models trained for natural language inference. This allows it to generalize to adversarial datasets and domains that supervised models require specific training data for. Our empirical results show that our approach outperforms previous zero-shot approaches on FEVER, FEVER-Symmetric, FEVER 2.0, and Climate-FEVER, while being comparable or better than supervised models on the adversarial and the out-of-domain datasets.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yuan-vlachos-2024-zero">
<titleInfo>
<title>Zero-Shot Fact-Checking with Semantic Triples and Knowledge Graphs</title>
</titleInfo>
<name type="personal">
<namePart type="given">Moy</namePart>
<namePart type="family">Yuan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on Knowledge Graphs and Large Language Models (KaLLM 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Russa</namePart>
<namePart type="family">Biswas</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucie-Aimée</namePart>
<namePart type="family">Kaffee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Oshin</namePart>
<namePart type="family">Agarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Pasquale</namePart>
<namePart type="family">Minervini</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sameer</namePart>
<namePart type="family">Singh</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gerard</namePart>
<namePart type="family">de Melo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Bangkok, Thailand</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Despite progress in automated fact-checking, most systems require a significant amount of labeled training data, which is expensive. In this paper, we propose a novel zero-shot method, which instead of operating directly on the claim and evidence sentences, decomposes them into semantic triples augmented using external knowledge graphs, and uses large language models trained for natural language inference. This allows it to generalize to adversarial datasets and domains that supervised models require specific training data for. Our empirical results show that our approach outperforms previous zero-shot approaches on FEVER, FEVER-Symmetric, FEVER 2.0, and Climate-FEVER, while being comparable or better than supervised models on the adversarial and the out-of-domain datasets.</abstract>
<identifier type="citekey">yuan-vlachos-2024-zero</identifier>
<identifier type="doi">10.18653/v1/2024.kallm-1.11</identifier>
<location>
<url>https://aclanthology.org/2024.kallm-1.11</url>
</location>
<part>
<date>2024-08</date>
<extent unit="page">
<start>105</start>
<end>115</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Zero-Shot Fact-Checking with Semantic Triples and Knowledge Graphs
%A Yuan, Moy
%A Vlachos, Andreas
%Y Biswas, Russa
%Y Kaffee, Lucie-Aimée
%Y Agarwal, Oshin
%Y Minervini, Pasquale
%Y Singh, Sameer
%Y de Melo, Gerard
%S Proceedings of the 1st Workshop on Knowledge Graphs and Large Language Models (KaLLM 2024)
%D 2024
%8 August
%I Association for Computational Linguistics
%C Bangkok, Thailand
%F yuan-vlachos-2024-zero
%X Despite progress in automated fact-checking, most systems require a significant amount of labeled training data, which is expensive. In this paper, we propose a novel zero-shot method, which instead of operating directly on the claim and evidence sentences, decomposes them into semantic triples augmented using external knowledge graphs, and uses large language models trained for natural language inference. This allows it to generalize to adversarial datasets and domains that supervised models require specific training data for. Our empirical results show that our approach outperforms previous zero-shot approaches on FEVER, FEVER-Symmetric, FEVER 2.0, and Climate-FEVER, while being comparable or better than supervised models on the adversarial and the out-of-domain datasets.
%R 10.18653/v1/2024.kallm-1.11
%U https://aclanthology.org/2024.kallm-1.11
%U https://doi.org/10.18653/v1/2024.kallm-1.11
%P 105-115
Markdown (Informal)
[Zero-Shot Fact-Checking with Semantic Triples and Knowledge Graphs](https://aclanthology.org/2024.kallm-1.11) (Yuan & Vlachos, KaLLM-WS 2024)
ACL