@inproceedings{liu-etal-2020-fine,
title = "Fine-grained Fact Verification with Kernel Graph Attention Network",
author = "Liu, Zhenghao and
Xiong, Chenyan and
Sun, Maosong and
Liu, Zhiyuan",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.acl-main.655",
doi = "10.18653/v1/2020.acl-main.655",
pages = "7342--7351",
abstract = "Fact Verification requires fine-grained natural language inference capability that finds subtle clues to identify the syntactical and semantically correct but not well-supported claims. This paper presents Kernel Graph Attention Network (KGAT), which conducts more fine-grained fact verification with kernel-based attentions. Given a claim and a set of potential evidence sentences that form an evidence graph, KGAT introduces node kernels, which better measure the importance of the evidence node, and edge kernels, which conduct fine-grained evidence propagation in the graph, into Graph Attention Networks for more accurate fact verification. KGAT achieves a 70.38{\%} FEVER score and significantly outperforms existing fact verification models on FEVER, a large-scale benchmark for fact verification. Our analyses illustrate that, compared to dot-product attentions, the kernel-based attention concentrates more on relevant evidence sentences and meaningful clues in the evidence graph, which is the main source of KGAT{'}s effectiveness. All source codes of this work are available at https://github.com/thunlp/KernelGAT.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="liu-etal-2020-fine">
<titleInfo>
<title>Fine-grained Fact Verification with Kernel Graph Attention Network</title>
</titleInfo>
<name type="personal">
<namePart type="given">Zhenghao</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chenyan</namePart>
<namePart type="family">Xiong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maosong</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhiyuan</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Fact Verification requires fine-grained natural language inference capability that finds subtle clues to identify the syntactical and semantically correct but not well-supported claims. This paper presents Kernel Graph Attention Network (KGAT), which conducts more fine-grained fact verification with kernel-based attentions. Given a claim and a set of potential evidence sentences that form an evidence graph, KGAT introduces node kernels, which better measure the importance of the evidence node, and edge kernels, which conduct fine-grained evidence propagation in the graph, into Graph Attention Networks for more accurate fact verification. KGAT achieves a 70.38% FEVER score and significantly outperforms existing fact verification models on FEVER, a large-scale benchmark for fact verification. Our analyses illustrate that, compared to dot-product attentions, the kernel-based attention concentrates more on relevant evidence sentences and meaningful clues in the evidence graph, which is the main source of KGAT’s effectiveness. All source codes of this work are available at https://github.com/thunlp/KernelGAT.</abstract>
<identifier type="citekey">liu-etal-2020-fine</identifier>
<identifier type="doi">10.18653/v1/2020.acl-main.655</identifier>
<location>
<url>https://aclanthology.org/2020.acl-main.655</url>
</location>
<part>
<date>2020-07</date>
<extent unit="page">
<start>7342</start>
<end>7351</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Fine-grained Fact Verification with Kernel Graph Attention Network
%A Liu, Zhenghao
%A Xiong, Chenyan
%A Sun, Maosong
%A Liu, Zhiyuan
%S Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%D 2020
%8 July
%I Association for Computational Linguistics
%C Online
%F liu-etal-2020-fine
%X Fact Verification requires fine-grained natural language inference capability that finds subtle clues to identify the syntactical and semantically correct but not well-supported claims. This paper presents Kernel Graph Attention Network (KGAT), which conducts more fine-grained fact verification with kernel-based attentions. Given a claim and a set of potential evidence sentences that form an evidence graph, KGAT introduces node kernels, which better measure the importance of the evidence node, and edge kernels, which conduct fine-grained evidence propagation in the graph, into Graph Attention Networks for more accurate fact verification. KGAT achieves a 70.38% FEVER score and significantly outperforms existing fact verification models on FEVER, a large-scale benchmark for fact verification. Our analyses illustrate that, compared to dot-product attentions, the kernel-based attention concentrates more on relevant evidence sentences and meaningful clues in the evidence graph, which is the main source of KGAT’s effectiveness. All source codes of this work are available at https://github.com/thunlp/KernelGAT.
%R 10.18653/v1/2020.acl-main.655
%U https://aclanthology.org/2020.acl-main.655
%U https://doi.org/10.18653/v1/2020.acl-main.655
%P 7342-7351
Markdown (Informal)
[Fine-grained Fact Verification with Kernel Graph Attention Network](https://aclanthology.org/2020.acl-main.655) (Liu et al., ACL 2020)
ACL